var/home/core/zuul-output/0000755000175000017500000000000015067454101014530 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015067470025015477 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000005241260715067470015017711 0ustar rootrootOct 02 10:52:45 crc systemd[1]: Starting Kubernetes Kubelet... Oct 02 10:52:45 crc restorecon[4557]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:45 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Oct 02 10:52:46 crc restorecon[4557]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Oct 02 10:52:47 crc kubenswrapper[4783]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 02 10:52:47 crc kubenswrapper[4783]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Oct 02 10:52:47 crc kubenswrapper[4783]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 02 10:52:47 crc kubenswrapper[4783]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 02 10:52:47 crc kubenswrapper[4783]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Oct 02 10:52:47 crc kubenswrapper[4783]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.241281 4783 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246506 4783 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246537 4783 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246546 4783 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246555 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246563 4783 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246572 4783 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246581 4783 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246589 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246597 4783 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246605 4783 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246613 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246622 4783 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246629 4783 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246637 4783 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246645 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246653 4783 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246661 4783 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246668 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246676 4783 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246684 4783 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246709 4783 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246720 4783 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246728 4783 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246736 4783 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246744 4783 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246752 4783 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246763 4783 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246771 4783 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246780 4783 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246787 4783 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246799 4783 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246810 4783 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246820 4783 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246829 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246837 4783 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246845 4783 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246854 4783 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246862 4783 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246870 4783 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246877 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246885 4783 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246893 4783 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246901 4783 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246909 4783 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246918 4783 feature_gate.go:330] unrecognized feature gate: Example Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246926 4783 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246933 4783 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246941 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246949 4783 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246957 4783 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246964 4783 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246972 4783 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246979 4783 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246987 4783 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.246995 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247002 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247011 4783 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247018 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247026 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247034 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247042 4783 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247050 4783 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247058 4783 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247068 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247076 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247084 4783 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247094 4783 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247103 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247111 4783 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247119 4783 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.247128 4783 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248506 4783 flags.go:64] FLAG: --address="0.0.0.0" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248528 4783 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248544 4783 flags.go:64] FLAG: --anonymous-auth="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248556 4783 flags.go:64] FLAG: --application-metrics-count-limit="100" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248567 4783 flags.go:64] FLAG: --authentication-token-webhook="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248577 4783 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248589 4783 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248601 4783 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248613 4783 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248622 4783 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248632 4783 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248641 4783 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248650 4783 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248659 4783 flags.go:64] FLAG: --cgroup-root="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248668 4783 flags.go:64] FLAG: --cgroups-per-qos="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248677 4783 flags.go:64] FLAG: --client-ca-file="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248686 4783 flags.go:64] FLAG: --cloud-config="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248696 4783 flags.go:64] FLAG: --cloud-provider="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248704 4783 flags.go:64] FLAG: --cluster-dns="[]" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248715 4783 flags.go:64] FLAG: --cluster-domain="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248724 4783 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248734 4783 flags.go:64] FLAG: --config-dir="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248743 4783 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248753 4783 flags.go:64] FLAG: --container-log-max-files="5" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248765 4783 flags.go:64] FLAG: --container-log-max-size="10Mi" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248774 4783 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248783 4783 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248792 4783 flags.go:64] FLAG: --containerd-namespace="k8s.io" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248801 4783 flags.go:64] FLAG: --contention-profiling="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248810 4783 flags.go:64] FLAG: --cpu-cfs-quota="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248819 4783 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248829 4783 flags.go:64] FLAG: --cpu-manager-policy="none" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248838 4783 flags.go:64] FLAG: --cpu-manager-policy-options="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248848 4783 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248859 4783 flags.go:64] FLAG: --enable-controller-attach-detach="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248868 4783 flags.go:64] FLAG: --enable-debugging-handlers="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248877 4783 flags.go:64] FLAG: --enable-load-reader="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248886 4783 flags.go:64] FLAG: --enable-server="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248895 4783 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248907 4783 flags.go:64] FLAG: --event-burst="100" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248916 4783 flags.go:64] FLAG: --event-qps="50" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248925 4783 flags.go:64] FLAG: --event-storage-age-limit="default=0" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248934 4783 flags.go:64] FLAG: --event-storage-event-limit="default=0" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248945 4783 flags.go:64] FLAG: --eviction-hard="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248957 4783 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248966 4783 flags.go:64] FLAG: --eviction-minimum-reclaim="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248975 4783 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248984 4783 flags.go:64] FLAG: --eviction-soft="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.248993 4783 flags.go:64] FLAG: --eviction-soft-grace-period="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249003 4783 flags.go:64] FLAG: --exit-on-lock-contention="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249013 4783 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249022 4783 flags.go:64] FLAG: --experimental-mounter-path="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249031 4783 flags.go:64] FLAG: --fail-cgroupv1="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249040 4783 flags.go:64] FLAG: --fail-swap-on="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249048 4783 flags.go:64] FLAG: --feature-gates="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249059 4783 flags.go:64] FLAG: --file-check-frequency="20s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249068 4783 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249078 4783 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249087 4783 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249096 4783 flags.go:64] FLAG: --healthz-port="10248" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249104 4783 flags.go:64] FLAG: --help="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249114 4783 flags.go:64] FLAG: --hostname-override="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249122 4783 flags.go:64] FLAG: --housekeeping-interval="10s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249131 4783 flags.go:64] FLAG: --http-check-frequency="20s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249140 4783 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249148 4783 flags.go:64] FLAG: --image-credential-provider-config="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249158 4783 flags.go:64] FLAG: --image-gc-high-threshold="85" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249166 4783 flags.go:64] FLAG: --image-gc-low-threshold="80" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249175 4783 flags.go:64] FLAG: --image-service-endpoint="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249183 4783 flags.go:64] FLAG: --kernel-memcg-notification="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249192 4783 flags.go:64] FLAG: --kube-api-burst="100" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249202 4783 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249211 4783 flags.go:64] FLAG: --kube-api-qps="50" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249219 4783 flags.go:64] FLAG: --kube-reserved="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249228 4783 flags.go:64] FLAG: --kube-reserved-cgroup="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249281 4783 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249291 4783 flags.go:64] FLAG: --kubelet-cgroups="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249300 4783 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249309 4783 flags.go:64] FLAG: --lock-file="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249318 4783 flags.go:64] FLAG: --log-cadvisor-usage="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249328 4783 flags.go:64] FLAG: --log-flush-frequency="5s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249365 4783 flags.go:64] FLAG: --log-json-info-buffer-size="0" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249379 4783 flags.go:64] FLAG: --log-json-split-stream="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249387 4783 flags.go:64] FLAG: --log-text-info-buffer-size="0" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249396 4783 flags.go:64] FLAG: --log-text-split-stream="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249405 4783 flags.go:64] FLAG: --logging-format="text" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249459 4783 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249470 4783 flags.go:64] FLAG: --make-iptables-util-chains="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249479 4783 flags.go:64] FLAG: --manifest-url="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249488 4783 flags.go:64] FLAG: --manifest-url-header="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249500 4783 flags.go:64] FLAG: --max-housekeeping-interval="15s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249508 4783 flags.go:64] FLAG: --max-open-files="1000000" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249550 4783 flags.go:64] FLAG: --max-pods="110" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249560 4783 flags.go:64] FLAG: --maximum-dead-containers="-1" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249570 4783 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249579 4783 flags.go:64] FLAG: --memory-manager-policy="None" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249587 4783 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249597 4783 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249637 4783 flags.go:64] FLAG: --node-ip="192.168.126.11" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249647 4783 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249666 4783 flags.go:64] FLAG: --node-status-max-images="50" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249675 4783 flags.go:64] FLAG: --node-status-update-frequency="10s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249684 4783 flags.go:64] FLAG: --oom-score-adj="-999" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249751 4783 flags.go:64] FLAG: --pod-cidr="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249762 4783 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249777 4783 flags.go:64] FLAG: --pod-manifest-path="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249786 4783 flags.go:64] FLAG: --pod-max-pids="-1" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249830 4783 flags.go:64] FLAG: --pods-per-core="0" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249839 4783 flags.go:64] FLAG: --port="10250" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249848 4783 flags.go:64] FLAG: --protect-kernel-defaults="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249857 4783 flags.go:64] FLAG: --provider-id="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249866 4783 flags.go:64] FLAG: --qos-reserved="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249875 4783 flags.go:64] FLAG: --read-only-port="10255" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249914 4783 flags.go:64] FLAG: --register-node="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249923 4783 flags.go:64] FLAG: --register-schedulable="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249933 4783 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249948 4783 flags.go:64] FLAG: --registry-burst="10" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249958 4783 flags.go:64] FLAG: --registry-qps="5" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.249997 4783 flags.go:64] FLAG: --reserved-cpus="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250006 4783 flags.go:64] FLAG: --reserved-memory="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250017 4783 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250026 4783 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250035 4783 flags.go:64] FLAG: --rotate-certificates="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250045 4783 flags.go:64] FLAG: --rotate-server-certificates="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250054 4783 flags.go:64] FLAG: --runonce="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250093 4783 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250102 4783 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250113 4783 flags.go:64] FLAG: --seccomp-default="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250122 4783 flags.go:64] FLAG: --serialize-image-pulls="true" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250131 4783 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250140 4783 flags.go:64] FLAG: --storage-driver-db="cadvisor" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250180 4783 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250189 4783 flags.go:64] FLAG: --storage-driver-password="root" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250198 4783 flags.go:64] FLAG: --storage-driver-secure="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250208 4783 flags.go:64] FLAG: --storage-driver-table="stats" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250216 4783 flags.go:64] FLAG: --storage-driver-user="root" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250225 4783 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250264 4783 flags.go:64] FLAG: --sync-frequency="1m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250275 4783 flags.go:64] FLAG: --system-cgroups="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250284 4783 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250298 4783 flags.go:64] FLAG: --system-reserved-cgroup="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250307 4783 flags.go:64] FLAG: --tls-cert-file="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250316 4783 flags.go:64] FLAG: --tls-cipher-suites="[]" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250351 4783 flags.go:64] FLAG: --tls-min-version="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250361 4783 flags.go:64] FLAG: --tls-private-key-file="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250371 4783 flags.go:64] FLAG: --topology-manager-policy="none" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250380 4783 flags.go:64] FLAG: --topology-manager-policy-options="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250389 4783 flags.go:64] FLAG: --topology-manager-scope="container" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250399 4783 flags.go:64] FLAG: --v="2" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250444 4783 flags.go:64] FLAG: --version="false" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250456 4783 flags.go:64] FLAG: --vmodule="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250468 4783 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.250478 4783 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250803 4783 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250814 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250825 4783 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250834 4783 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250842 4783 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250880 4783 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250890 4783 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250899 4783 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250907 4783 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250915 4783 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250923 4783 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250931 4783 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250969 4783 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250977 4783 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250985 4783 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.250993 4783 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251001 4783 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251009 4783 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251016 4783 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251053 4783 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251062 4783 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251069 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251077 4783 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251085 4783 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251093 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251106 4783 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251143 4783 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251151 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251159 4783 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251167 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251174 4783 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251182 4783 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251190 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251231 4783 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251243 4783 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251253 4783 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251261 4783 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251269 4783 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251277 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251285 4783 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251322 4783 feature_gate.go:330] unrecognized feature gate: Example Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251331 4783 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251338 4783 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251349 4783 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251359 4783 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251368 4783 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251402 4783 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251445 4783 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251455 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251463 4783 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251472 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251479 4783 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251489 4783 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251502 4783 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251549 4783 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251560 4783 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251570 4783 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251584 4783 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251595 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251643 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251655 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251666 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251676 4783 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251688 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251742 4783 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251754 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251766 4783 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251776 4783 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251787 4783 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251842 4783 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.251858 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.253776 4783 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.268069 4783 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.268124 4783 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268314 4783 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268330 4783 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268340 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268350 4783 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268359 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268368 4783 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268379 4783 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268388 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268399 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268437 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268449 4783 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268483 4783 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268495 4783 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268504 4783 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268513 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268522 4783 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268530 4783 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268538 4783 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268547 4783 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268556 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268564 4783 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268576 4783 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268592 4783 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268602 4783 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268613 4783 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268622 4783 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268632 4783 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268641 4783 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268651 4783 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268661 4783 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268670 4783 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268680 4783 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268689 4783 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268698 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268709 4783 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268718 4783 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268726 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268735 4783 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268744 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268752 4783 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268761 4783 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268770 4783 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268778 4783 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268787 4783 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268796 4783 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268806 4783 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268814 4783 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268822 4783 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268831 4783 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268840 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268848 4783 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268857 4783 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268865 4783 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268874 4783 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268882 4783 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268891 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268899 4783 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268907 4783 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268915 4783 feature_gate.go:330] unrecognized feature gate: Example Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268924 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268933 4783 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268941 4783 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268953 4783 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268962 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268971 4783 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268979 4783 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268988 4783 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.268996 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269004 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269013 4783 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269026 4783 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.269042 4783 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269282 4783 feature_gate.go:330] unrecognized feature gate: SignatureStores Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269296 4783 feature_gate.go:330] unrecognized feature gate: GatewayAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269305 4783 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269314 4783 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269323 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269332 4783 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269340 4783 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269349 4783 feature_gate.go:330] unrecognized feature gate: PlatformOperators Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269358 4783 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269367 4783 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269376 4783 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269384 4783 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269392 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269401 4783 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269607 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269617 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269625 4783 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269633 4783 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269642 4783 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269651 4783 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269660 4783 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269669 4783 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269678 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfig Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269686 4783 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269695 4783 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269707 4783 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269719 4783 feature_gate.go:330] unrecognized feature gate: NewOLM Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269729 4783 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269738 4783 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269748 4783 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269759 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269769 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269778 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269787 4783 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269797 4783 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269806 4783 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269815 4783 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269825 4783 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269833 4783 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269841 4783 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269850 4783 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269861 4783 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269870 4783 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269879 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269887 4783 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269897 4783 feature_gate.go:330] unrecognized feature gate: PinnedImages Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269905 4783 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269913 4783 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269922 4783 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269930 4783 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269942 4783 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269953 4783 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269963 4783 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269974 4783 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269985 4783 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.269994 4783 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270005 4783 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270013 4783 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270022 4783 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270030 4783 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270038 4783 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270047 4783 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270056 4783 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270064 4783 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270073 4783 feature_gate.go:330] unrecognized feature gate: Example Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270081 4783 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270090 4783 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270098 4783 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270107 4783 feature_gate.go:330] unrecognized feature gate: OVNObservability Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270115 4783 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.270124 4783 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.270139 4783 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.271359 4783 server.go:940] "Client rotation is on, will bootstrap in background" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.277489 4783 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.277986 4783 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.280547 4783 server.go:997] "Starting client certificate rotation" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.280599 4783 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.281801 4783 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-12-31 13:58:24.060299679 +0000 UTC Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.281936 4783 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 2163h5m36.778368712s for next certificate rotation Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.322626 4783 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.324833 4783 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.349733 4783 log.go:25] "Validated CRI v1 runtime API" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.395569 4783 log.go:25] "Validated CRI v1 image API" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.399878 4783 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.410332 4783 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-10-02-10-45-57-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.410386 4783 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:41 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:42 fsType:tmpfs blockSize:0}] Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.430680 4783 manager.go:217] Machine: {Timestamp:2025-10-02 10:52:47.428872351 +0000 UTC m=+0.745066692 CPUVendorID:AuthenticAMD NumCores:8 NumPhysicalCores:1 NumSockets:8 CpuFrequency:2800000 MemoryCapacity:25199480832 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:b9763463-2b4a-4924-bf4e-8df5af678b9c BootID:bc187a47-fc71-4069-a609-1fd638044aa7 Filesystems:[{Device:/run/user/1000 DeviceMajor:0 DeviceMinor:41 Capacity:2519945216 Type:vfs Inodes:615221 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:42 Capacity:1073741824 Type:vfs Inodes:3076108 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:12599738368 Type:vfs Inodes:3076108 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:5039898624 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:12599742464 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:429496729600 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:1a:f0:2c Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:1a:f0:2c Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:00:f6:bf Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:e6:13:87 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:53:1b:15 Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:65:a5:98 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:b2:ba:7c:6e:fe:94 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:fe:01:5b:e8:37:51 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:25199480832 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.430992 4783 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.431230 4783 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.432708 4783 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.432990 4783 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.433058 4783 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.433312 4783 topology_manager.go:138] "Creating topology manager with none policy" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.433331 4783 container_manager_linux.go:303] "Creating device plugin manager" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.434063 4783 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.434108 4783 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.435060 4783 state_mem.go:36] "Initialized new in-memory state store" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.435757 4783 server.go:1245] "Using root directory" path="/var/lib/kubelet" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.439169 4783 kubelet.go:418] "Attempting to sync node with API server" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.439199 4783 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.439248 4783 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.439266 4783 kubelet.go:324] "Adding apiserver pod source" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.439278 4783 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.445368 4783 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.446530 4783 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.449644 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.449775 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.449637 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.449854 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.449975 4783 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451543 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451571 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451581 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451602 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451619 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451675 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451689 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451703 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451712 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451722 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451735 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.451746 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.452797 4783 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.453549 4783 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.453768 4783 server.go:1280] "Started kubelet" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.453904 4783 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.454854 4783 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.455688 4783 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.455730 4783 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.455790 4783 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 17:12:28.060782341 +0000 UTC Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.455839 4783 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1878h19m40.604945898s for next certificate rotation Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.455899 4783 volume_manager.go:287] "The desired_state_of_world populator starts" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.455909 4783 volume_manager.go:289] "Starting Kubelet Volume Manager" Oct 02 10:52:47 crc systemd[1]: Started Kubernetes Kubelet. Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.461318 4783 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.463086 4783 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.460697 4783 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.465436 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.465666 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.466673 4783 server.go:460] "Adding debug handlers to kubelet server" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.468171 4783 factory.go:55] Registering systemd factory Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.468301 4783 factory.go:221] Registration of the systemd container factory successfully Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.470112 4783 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.217:6443: connect: connection refused" interval="200ms" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.469582 4783 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.217:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186aa721a90f8352 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-10-02 10:52:47.453725522 +0000 UTC m=+0.769919783,LastTimestamp:2025-10-02 10:52:47.453725522 +0000 UTC m=+0.769919783,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.475751 4783 factory.go:153] Registering CRI-O factory Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.475791 4783 factory.go:221] Registration of the crio container factory successfully Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.475896 4783 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.475935 4783 factory.go:103] Registering Raw factory Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.475957 4783 manager.go:1196] Started watching for new ooms in manager Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.476880 4783 manager.go:319] Starting recovery of all containers Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.480438 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.480803 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.480885 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.480958 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481034 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481094 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481163 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481242 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481308 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481365 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481438 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481535 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481619 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481691 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481750 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481827 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481894 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.481996 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.482056 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.482113 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485019 4783 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485094 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485120 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485137 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485176 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485196 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485213 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485253 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485276 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485293 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485309 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485326 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485346 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485362 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485380 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485394 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485431 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485447 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485462 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485481 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485498 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485513 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485549 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485567 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485598 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485615 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485633 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485646 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485662 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485678 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485695 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485710 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485726 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485756 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485769 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485781 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485793 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485805 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485816 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485828 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485843 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485854 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485874 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485885 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485897 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485907 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485919 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485934 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485949 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485962 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485976 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.485991 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486006 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486020 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486035 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486053 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486066 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486077 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486088 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486099 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486111 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486122 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486132 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486144 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486154 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486166 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486177 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486190 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486200 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486281 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486292 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486303 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486316 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486493 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486508 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486517 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486528 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486539 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486551 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486583 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486596 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486605 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486615 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486624 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486651 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486671 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486684 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486695 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486707 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486747 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486762 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486778 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486794 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486843 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486862 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486876 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486963 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486985 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.486997 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487009 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487038 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487052 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487064 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487077 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487090 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487125 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487138 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487149 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487167 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487181 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487221 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487237 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487251 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487265 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487306 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487320 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487335 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487348 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487376 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487386 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487397 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487432 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487445 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487455 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487468 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487479 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487489 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487529 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487540 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487554 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487564 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487575 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487611 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487624 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487634 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487645 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487658 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487684 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487696 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487707 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487812 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487827 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487841 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487853 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487882 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487898 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487914 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.487930 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488010 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488049 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488069 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488084 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488100 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488137 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488151 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488165 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488181 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488218 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488234 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488249 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488264 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488299 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488317 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488334 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488369 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488384 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488399 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488440 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488458 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488477 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488521 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488538 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488552 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488565 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488615 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488635 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488662 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488703 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488718 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488735 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488773 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488788 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488800 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488812 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488826 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488856 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488868 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488884 4783 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488896 4783 reconstruct.go:97] "Volume reconstruction finished" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.488905 4783 reconciler.go:26] "Reconciler: start to sync state" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.494988 4783 manager.go:324] Recovery completed Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.513661 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.515183 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.515222 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.515233 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.516892 4783 cpu_manager.go:225] "Starting CPU manager" policy="none" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.516915 4783 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.516940 4783 state_mem.go:36] "Initialized new in-memory state store" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.540934 4783 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.543321 4783 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.543401 4783 status_manager.go:217] "Starting to sync pod status with apiserver" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.543565 4783 kubelet.go:2335] "Starting kubelet main sync loop" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.543758 4783 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Oct 02 10:52:47 crc kubenswrapper[4783]: W1002 10:52:47.544568 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.544625 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.564673 4783 policy_none.go:49] "None policy: Start" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.565184 4783 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.565652 4783 memory_manager.go:170] "Starting memorymanager" policy="None" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.565676 4783 state_mem.go:35] "Initializing new in-memory state store" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.642559 4783 manager.go:334] "Starting Device Plugin manager" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.642640 4783 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.642657 4783 server.go:79] "Starting device plugin registration server" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.643129 4783 eviction_manager.go:189] "Eviction manager: starting control loop" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.643146 4783 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.644005 4783 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.644076 4783 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.644082 4783 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.644155 4783 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.644206 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.646072 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.646105 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.646119 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.646241 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.646398 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.646466 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.647205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.647251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.647261 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.647358 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.647573 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.647613 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648548 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648558 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648578 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648579 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648598 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648620 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648632 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648637 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648589 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.648955 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.649166 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.649280 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650035 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650068 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650078 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650186 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650385 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650476 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650494 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650515 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.650526 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.651069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.651089 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.651100 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.651245 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.651266 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.652169 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.652192 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.652201 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.653661 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.653681 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.653690 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.660928 4783 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.670797 4783 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.217:6443: connect: connection refused" interval="400ms" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692354 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692389 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692421 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692440 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692464 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692498 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692579 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692625 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692650 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692667 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692681 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692699 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692737 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692780 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.692816 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.743355 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.744458 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.744593 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.744685 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.744789 4783 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.745403 4783 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.217:6443: connect: connection refused" node="crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.793905 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.793948 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.793973 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.793992 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794010 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794029 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794045 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794063 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794082 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794095 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794136 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794137 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794149 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794170 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794099 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794102 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794189 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794158 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794236 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794194 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794209 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794288 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794205 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794325 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794367 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794370 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794449 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794489 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794502 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.794457 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.946486 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.950073 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.950124 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.950142 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.950174 4783 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 02 10:52:47 crc kubenswrapper[4783]: E1002 10:52:47.953921 4783 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.217:6443: connect: connection refused" node="crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.977529 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Oct 02 10:52:47 crc kubenswrapper[4783]: I1002 10:52:47.985786 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.009810 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.032621 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.043255 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.059923 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-9e41471dd8ac498142263f7d49c1f649070c62285e37da2c94f76d7606569c37 WatchSource:0}: Error finding container 9e41471dd8ac498142263f7d49c1f649070c62285e37da2c94f76d7606569c37: Status 404 returned error can't find the container with id 9e41471dd8ac498142263f7d49c1f649070c62285e37da2c94f76d7606569c37 Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.060923 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-329e5d9de5935660315aebc030207c7670d82551a7e7e5b4412868b9d3999089 WatchSource:0}: Error finding container 329e5d9de5935660315aebc030207c7670d82551a7e7e5b4412868b9d3999089: Status 404 returned error can't find the container with id 329e5d9de5935660315aebc030207c7670d82551a7e7e5b4412868b9d3999089 Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.071961 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-7be725f3455e988b194fac149d9e545bc7a4a86895aeedeb96fb8ff90e6e7639 WatchSource:0}: Error finding container 7be725f3455e988b194fac149d9e545bc7a4a86895aeedeb96fb8ff90e6e7639: Status 404 returned error can't find the container with id 7be725f3455e988b194fac149d9e545bc7a4a86895aeedeb96fb8ff90e6e7639 Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.072014 4783 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.217:6443: connect: connection refused" interval="800ms" Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.080842 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-a32effe0c4064b1504a1819ddddbca2c2a6765df6c7fc44abd0f233f4cf5dd96 WatchSource:0}: Error finding container a32effe0c4064b1504a1819ddddbca2c2a6765df6c7fc44abd0f233f4cf5dd96: Status 404 returned error can't find the container with id a32effe0c4064b1504a1819ddddbca2c2a6765df6c7fc44abd0f233f4cf5dd96 Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.084313 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-0067c53a6962ed5e0bd767f3b8448d5e4959f4b30402052baf55f32668981c5b WatchSource:0}: Error finding container 0067c53a6962ed5e0bd767f3b8448d5e4959f4b30402052baf55f32668981c5b: Status 404 returned error can't find the container with id 0067c53a6962ed5e0bd767f3b8448d5e4959f4b30402052baf55f32668981c5b Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.354582 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.356731 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.356781 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.356798 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.356833 4783 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.357462 4783 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.217:6443: connect: connection refused" node="crc" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.454592 4783 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.547232 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"9e41471dd8ac498142263f7d49c1f649070c62285e37da2c94f76d7606569c37"} Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.548500 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0067c53a6962ed5e0bd767f3b8448d5e4959f4b30402052baf55f32668981c5b"} Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.549431 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a32effe0c4064b1504a1819ddddbca2c2a6765df6c7fc44abd0f233f4cf5dd96"} Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.549921 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.549998 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.550464 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7be725f3455e988b194fac149d9e545bc7a4a86895aeedeb96fb8ff90e6e7639"} Oct 02 10:52:48 crc kubenswrapper[4783]: I1002 10:52:48.551457 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"329e5d9de5935660315aebc030207c7670d82551a7e7e5b4412868b9d3999089"} Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.578599 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.578722 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.788917 4783 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.217:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.186aa721a90f8352 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-10-02 10:52:47.453725522 +0000 UTC m=+0.769919783,LastTimestamp:2025-10-02 10:52:47.453725522 +0000 UTC m=+0.769919783,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.821358 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.821457 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.873647 4783 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.217:6443: connect: connection refused" interval="1.6s" Oct 02 10:52:48 crc kubenswrapper[4783]: W1002 10:52:48.904618 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:48 crc kubenswrapper[4783]: E1002 10:52:48.904721 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.158138 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.159660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.159712 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.159729 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.159761 4783 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 02 10:52:49 crc kubenswrapper[4783]: E1002 10:52:49.160242 4783 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.217:6443: connect: connection refused" node="crc" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.454932 4783 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.557974 4783 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008" exitCode=0 Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.558117 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008"} Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.558140 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.560088 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.560149 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.560172 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.560618 4783 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b" exitCode=0 Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.560717 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b"} Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.560888 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.562867 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.563121 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.563185 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.563209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.564487 4783 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d" exitCode=0 Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.564590 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.564610 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d"} Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.565267 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.565325 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.565342 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.566579 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.566639 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.566663 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.570560 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5"} Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.570846 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f"} Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.573107 4783 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b" exitCode=0 Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.573158 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b"} Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.573267 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.574907 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.575107 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:49 crc kubenswrapper[4783]: I1002 10:52:49.575565 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.455094 4783 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:50 crc kubenswrapper[4783]: E1002 10:52:50.474953 4783 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.217:6443: connect: connection refused" interval="3.2s" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.577441 4783 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0" exitCode=0 Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.577531 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.577550 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.578461 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.578489 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.578503 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.579322 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.579402 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.580441 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.580471 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.580484 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.583483 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.583516 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.583595 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.584607 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.584643 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.584653 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.586585 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.586661 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.586703 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.586719 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.587792 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.587815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.587823 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.590715 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.590746 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.590756 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.590767 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075"} Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.760554 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.761731 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.761757 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.761766 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:50 crc kubenswrapper[4783]: I1002 10:52:50.761786 4783 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 02 10:52:50 crc kubenswrapper[4783]: E1002 10:52:50.762123 4783 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.217:6443: connect: connection refused" node="crc" Oct 02 10:52:51 crc kubenswrapper[4783]: W1002 10:52:51.142553 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:51 crc kubenswrapper[4783]: E1002 10:52:51.142649 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:51 crc kubenswrapper[4783]: W1002 10:52:51.336199 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:51 crc kubenswrapper[4783]: E1002 10:52:51.336280 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:51 crc kubenswrapper[4783]: W1002 10:52:51.348193 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:51 crc kubenswrapper[4783]: E1002 10:52:51.348349 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.454624 4783 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.595510 4783 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1" exitCode=0 Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.595598 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1"} Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.595612 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.596795 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.596830 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.596847 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.600523 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"09abd61a7b7c383afc9b731a669e303142e7c6cfd0ddac5d4eb48c5321695bbc"} Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.600806 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.600875 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.600884 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.600918 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.600929 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603343 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603378 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603391 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603390 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603470 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603474 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603516 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.603541 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.604396 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.604484 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:51 crc kubenswrapper[4783]: I1002 10:52:51.604495 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:51 crc kubenswrapper[4783]: W1002 10:52:51.719263 4783 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.217:6443: connect: connection refused Oct 02 10:52:51 crc kubenswrapper[4783]: E1002 10:52:51.719344 4783 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.217:6443: connect: connection refused" logger="UnhandledError" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.605311 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.607347 4783 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="09abd61a7b7c383afc9b731a669e303142e7c6cfd0ddac5d4eb48c5321695bbc" exitCode=255 Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.607445 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"09abd61a7b7c383afc9b731a669e303142e7c6cfd0ddac5d4eb48c5321695bbc"} Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.607612 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.608543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.608571 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.608582 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.609144 4783 scope.go:117] "RemoveContainer" containerID="09abd61a7b7c383afc9b731a669e303142e7c6cfd0ddac5d4eb48c5321695bbc" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.609436 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.610800 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb"} Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.610836 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e"} Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.610855 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3"} Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.610862 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.611642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.611667 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.611676 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:52 crc kubenswrapper[4783]: I1002 10:52:52.875488 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.618798 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef"} Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.618900 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2"} Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.618974 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.620301 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.620365 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.620387 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.622031 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.624228 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a"} Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.624372 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.629927 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.629988 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.630003 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.667164 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.962775 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.964685 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.964748 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.964783 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:53 crc kubenswrapper[4783]: I1002 10:52:53.964826 4783 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.627367 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.627513 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.627376 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.629132 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.629157 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.629181 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.629195 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.629199 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.629221 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:54 crc kubenswrapper[4783]: I1002 10:52:54.733534 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.448567 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.448806 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.450321 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.450381 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.450404 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.579854 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.630052 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.630124 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.631885 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.631950 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.631977 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.632034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.632091 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:55 crc kubenswrapper[4783]: I1002 10:52:55.632115 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.441976 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.632724 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.632785 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.634026 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.634063 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.634077 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.634392 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.634495 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:56 crc kubenswrapper[4783]: I1002 10:52:56.634518 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:57 crc kubenswrapper[4783]: I1002 10:52:57.137806 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:57 crc kubenswrapper[4783]: I1002 10:52:57.145922 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:57 crc kubenswrapper[4783]: I1002 10:52:57.636571 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:57 crc kubenswrapper[4783]: I1002 10:52:57.638252 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:57 crc kubenswrapper[4783]: I1002 10:52:57.638306 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:57 crc kubenswrapper[4783]: I1002 10:52:57.638321 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:57 crc kubenswrapper[4783]: E1002 10:52:57.661290 4783 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.275509 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.275821 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.280347 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.280454 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.280511 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.580250 4783 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.580378 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.640402 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.642017 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.642095 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.642121 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:58 crc kubenswrapper[4783]: I1002 10:52:58.647282 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.642218 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.643097 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.643141 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.643154 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.927358 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.927539 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.928616 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.928694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:52:59 crc kubenswrapper[4783]: I1002 10:52:59.928714 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:02 crc kubenswrapper[4783]: I1002 10:53:02.455573 4783 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Oct 02 10:53:02 crc kubenswrapper[4783]: I1002 10:53:02.754632 4783 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 02 10:53:02 crc kubenswrapper[4783]: I1002 10:53:02.754698 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 02 10:53:02 crc kubenswrapper[4783]: I1002 10:53:02.766915 4783 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Oct 02 10:53:02 crc kubenswrapper[4783]: I1002 10:53:02.766982 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Oct 02 10:53:02 crc kubenswrapper[4783]: I1002 10:53:02.876226 4783 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 02 10:53:02 crc kubenswrapper[4783]: I1002 10:53:02.876297 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.739441 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.739601 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.740175 4783 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.740235 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.740590 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.740638 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.740651 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:04 crc kubenswrapper[4783]: I1002 10:53:04.743741 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.586354 4783 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.586487 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.660306 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.661139 4783 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.661223 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.661702 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.661770 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:05 crc kubenswrapper[4783]: I1002 10:53:05.661794 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:07 crc kubenswrapper[4783]: E1002 10:53:07.662077 4783 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Oct 02 10:53:07 crc kubenswrapper[4783]: E1002 10:53:07.764484 4783 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.767642 4783 trace.go:236] Trace[1206190186]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (02-Oct-2025 10:52:56.714) (total time: 11053ms): Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1206190186]: ---"Objects listed" error: 11053ms (10:53:07.767) Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1206190186]: [11.053250425s] [11.053250425s] END Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.767699 4783 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.768340 4783 trace.go:236] Trace[1628312545]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (02-Oct-2025 10:52:55.719) (total time: 12049ms): Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1628312545]: ---"Objects listed" error: 12049ms (10:53:07.768) Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1628312545]: [12.049110686s] [12.049110686s] END Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.768377 4783 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.769861 4783 trace.go:236] Trace[1840956514]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (02-Oct-2025 10:52:55.497) (total time: 12271ms): Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1840956514]: ---"Objects listed" error: 12271ms (10:53:07.769) Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1840956514]: [12.271819676s] [12.271819676s] END Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.769897 4783 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.770041 4783 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.774111 4783 trace.go:236] Trace[1707712580]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (02-Oct-2025 10:52:56.118) (total time: 11655ms): Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1707712580]: ---"Objects listed" error: 11655ms (10:53:07.773) Oct 02 10:53:07 crc kubenswrapper[4783]: Trace[1707712580]: [11.655510226s] [11.655510226s] END Oct 02 10:53:07 crc kubenswrapper[4783]: I1002 10:53:07.774155 4783 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Oct 02 10:53:07 crc kubenswrapper[4783]: E1002 10:53:07.774733 4783 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.452239 4783 apiserver.go:52] "Watching apiserver" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.460654 4783 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.461105 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.461795 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.461958 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.462024 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.462113 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.462170 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.462053 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.462312 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.462804 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.464352 4783 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.465131 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.465859 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.466128 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.466184 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.465858 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.466666 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.471263 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.471297 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.471721 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.471806 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475108 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475174 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475224 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475272 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475318 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475363 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475408 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475492 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475535 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475577 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475622 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475666 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475707 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475748 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475798 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475840 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475881 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475927 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.475972 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476014 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476058 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476104 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476149 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476241 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476285 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476353 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476398 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.476477 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.477576 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.478760 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.478882 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.478925 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479574 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479651 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479668 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479700 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479747 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479794 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479845 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479882 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479893 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.479978 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480033 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480083 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480138 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480151 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480241 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480292 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480344 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480390 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480474 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.480912 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481338 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481378 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481405 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481516 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481567 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481615 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481659 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481704 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481750 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481795 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481844 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481892 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481940 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.481984 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.482031 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.482076 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.482122 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.482165 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.482255 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.482885 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.483221 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.483612 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.483666 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.483731 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.484163 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.484327 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.484471 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485058 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.484548 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485590 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485832 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.486049 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.486270 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.486572 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.487950 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488178 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488398 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488654 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488862 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.489061 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.489255 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.491483 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.491790 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.492254 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.492507 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.492701 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.492849 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.492987 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.493135 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.493270 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.493492 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.493727 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.493878 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494022 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494159 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494286 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494447 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494602 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494746 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495354 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495592 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495734 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495878 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.496021 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.496156 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.496295 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.496475 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.496612 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.496757 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.496972 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.497119 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.497265 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.497404 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.497678 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.497831 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.497974 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.498111 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.498242 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.498618 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.498756 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.498956 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.499104 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.499248 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.499392 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.499574 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.499703 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.499841 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.499991 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501629 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501670 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501695 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501725 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501773 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501799 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501823 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501849 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501873 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.501991 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502018 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502043 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502067 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502093 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502117 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502380 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.504708 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.504847 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505049 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505183 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505228 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505267 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505304 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505337 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505370 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505404 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506319 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506445 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506637 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506687 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506737 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506771 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506806 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506843 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506876 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506908 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506943 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.506979 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507016 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507049 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507083 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507119 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507152 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507187 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507224 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507258 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507295 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507329 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507366 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507402 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507468 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507506 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507540 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507575 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507611 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507645 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507678 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507714 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507749 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507784 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507820 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507856 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507894 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507932 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507966 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508002 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508041 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508078 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508113 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508152 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.509320 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.509402 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.509928 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510050 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510121 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510195 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510288 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510468 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510515 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510579 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510690 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510754 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510791 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510867 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510986 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.511509 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485323 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485513 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485781 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485797 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.485702 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.515679 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.515796 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.486148 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.486161 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.486682 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.486837 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.487138 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.515884 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.487352 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.487612 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.487637 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.487946 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488116 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488317 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488557 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488574 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488912 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.515929 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.489788 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.490323 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.490589 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.491176 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.491198 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.491551 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.492000 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.492590 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.493744 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494239 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494345 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.494751 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495151 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495348 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495414 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495661 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495810 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.495982 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502508 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502540 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502564 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.502584 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.503035 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.503927 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.505472 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507344 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507689 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.507760 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508107 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508124 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508163 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.508485 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.509332 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.509767 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510159 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510225 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510550 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510826 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.510904 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.511186 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.511973 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.512013 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.512168 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.512326 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.512779 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.513525 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.513960 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.515301 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.515592 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.516152 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.516194 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.516734 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.516787 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.517062 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.517216 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.517233 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.517337 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.488582 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.518396 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.518479 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.518986 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.519611 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.519619 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.517200 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.519979 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.520225 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.520671 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.520733 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.521023 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.521066 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.521287 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.521612 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.524184 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.524273 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.524319 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.524505 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.524819 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.526988 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.527478 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.527808 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.527817 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.528161 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.528380 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.528449 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.528445 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.528893 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.528968 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.529032 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.529178 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.529327 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.524626 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.530301 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.530574 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.530783 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.531029 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.531103 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.531296 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.532681 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.532934 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.533064 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.533665 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.533188 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.533933 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.534008 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.534172 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.535377 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.535691 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.536003 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.537218 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.537622 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.538271 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.538448 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.538669 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.539234 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.539777 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.539997 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.540679 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.541230 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.541852 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.542043 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.542083 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.542511 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:53:09.04249077 +0000 UTC m=+22.358685031 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.543078 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.545018 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.545063 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.545560 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.546037 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.546374 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.547497 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.547627 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.547790 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.547877 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:09.047855746 +0000 UTC m=+22.364050027 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.548669 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.549597 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.549944 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.550153 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.550378 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.551235 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.552145 4783 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.553841 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.553882 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.550752 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.552461 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.553055 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.553558 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.553816 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:09.053780356 +0000 UTC m=+22.369974637 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.555372 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.554465 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.555264 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.555911 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.556872 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.557198 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.557754 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558391 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558510 4783 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558524 4783 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558536 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558673 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558714 4783 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558732 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558752 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558764 4783 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558792 4783 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558804 4783 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558850 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558873 4783 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.558885 4783 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.560030 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.560070 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.560091 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.568879 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.571090 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.571123 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.571139 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.571195 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:09.071179307 +0000 UTC m=+22.387373568 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.584679 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.586006 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.586024 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.586036 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.586087 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:09.086068964 +0000 UTC m=+22.402263325 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.586318 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.589230 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.594147 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.595657 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.595944 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.596164 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.596300 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.596653 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.596667 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.598176 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.599486 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.601245 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.601629 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.601780 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.601936 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.602434 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.602588 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.603216 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.603364 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.605472 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.613665 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.615054 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.618869 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.624208 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.624916 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.633064 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.645874 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661434 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661477 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661546 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661553 4783 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661579 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661588 4783 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661597 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661605 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661575 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661613 4783 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661657 4783 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661672 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661685 4783 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661698 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661709 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661720 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661731 4783 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661742 4783 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661753 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661764 4783 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661774 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661784 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661794 4783 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661804 4783 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661814 4783 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661824 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661835 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661845 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661856 4783 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661867 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661878 4783 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661890 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661902 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661914 4783 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661925 4783 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661935 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661945 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661962 4783 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661973 4783 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661984 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.661995 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662006 4783 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662016 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662027 4783 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662037 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662048 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662058 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662069 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662079 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662090 4783 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662101 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662113 4783 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662123 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662134 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662152 4783 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662175 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662187 4783 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662223 4783 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662235 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662247 4783 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662257 4783 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662268 4783 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662279 4783 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662290 4783 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662300 4783 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662310 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662322 4783 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662334 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662345 4783 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662355 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662365 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662376 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662386 4783 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662396 4783 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662407 4783 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662437 4783 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662450 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662460 4783 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662471 4783 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662482 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662495 4783 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662505 4783 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662515 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662525 4783 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662542 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662553 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662564 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662574 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662590 4783 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662600 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662610 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662620 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662630 4783 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662641 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662651 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662662 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662674 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662684 4783 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662694 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662705 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662716 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662726 4783 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662737 4783 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662749 4783 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662764 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662775 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662786 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662798 4783 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662808 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662820 4783 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662833 4783 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662843 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662854 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662865 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662875 4783 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662886 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662897 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662908 4783 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662919 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662929 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662940 4783 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662967 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662977 4783 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662988 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.662999 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663009 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663020 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663031 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663042 4783 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663054 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663065 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663075 4783 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663086 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663097 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663108 4783 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663117 4783 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663130 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663141 4783 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663152 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663162 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663174 4783 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663184 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663194 4783 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663205 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663215 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663226 4783 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663236 4783 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663252 4783 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663262 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663273 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663283 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663293 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663304 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663314 4783 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663327 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663338 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663348 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663359 4783 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663370 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663381 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663392 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663404 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663431 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663443 4783 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663461 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663472 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663482 4783 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663493 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663505 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663517 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663527 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663538 4783 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663549 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663560 4783 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663571 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663583 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663595 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663606 4783 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663616 4783 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663627 4783 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.663638 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.669552 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.670522 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.672274 4783 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a" exitCode=255 Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.672466 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a"} Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.672549 4783 scope.go:117] "RemoveContainer" containerID="09abd61a7b7c383afc9b731a669e303142e7c6cfd0ddac5d4eb48c5321695bbc" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.683120 4783 scope.go:117] "RemoveContainer" containerID="ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.683265 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Oct 02 10:53:08 crc kubenswrapper[4783]: E1002 10:53:08.683316 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.684834 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.696366 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.705821 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.714128 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.726148 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.735683 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.744080 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.795493 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Oct 02 10:53:08 crc kubenswrapper[4783]: W1002 10:53:08.805551 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-fd65d9fb7332fc877618a7ed1b981daa00d5eedb3f433d8418b2006628bb08ae WatchSource:0}: Error finding container fd65d9fb7332fc877618a7ed1b981daa00d5eedb3f433d8418b2006628bb08ae: Status 404 returned error can't find the container with id fd65d9fb7332fc877618a7ed1b981daa00d5eedb3f433d8418b2006628bb08ae Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.837501 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Oct 02 10:53:08 crc kubenswrapper[4783]: I1002 10:53:08.845877 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Oct 02 10:53:08 crc kubenswrapper[4783]: W1002 10:53:08.869576 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-dd69052c6920a2a9102103c78853484b922594ddf42d050027771c195c4e4593 WatchSource:0}: Error finding container dd69052c6920a2a9102103c78853484b922594ddf42d050027771c195c4e4593: Status 404 returned error can't find the container with id dd69052c6920a2a9102103c78853484b922594ddf42d050027771c195c4e4593 Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.067070 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.067153 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.067187 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.067249 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.067286 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:10.067274141 +0000 UTC m=+23.383468402 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.067367 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:53:10.067360563 +0000 UTC m=+23.383554824 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.067443 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.067466 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:10.067459056 +0000 UTC m=+23.383653317 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.167815 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.167880 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168025 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168040 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168051 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168105 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:10.168092115 +0000 UTC m=+23.484286366 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168196 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168206 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168233 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.168255 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:10.168249249 +0000 UTC m=+23.484443510 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.548005 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.548676 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.549646 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.550368 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.552194 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.552899 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.554200 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.555001 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.556353 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.557112 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.558334 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.559257 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.560481 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.561189 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.561982 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.563977 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.564738 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.566634 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.567454 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.568231 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.569750 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.571808 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.572620 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.573680 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.574383 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.575151 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.575934 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.576541 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.578947 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.579885 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.580968 4783 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.581105 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.585642 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.586288 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.586845 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.589381 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.590220 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.590874 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.593213 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.595504 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.596345 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.597512 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.599108 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.603873 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.604447 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.605576 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.606189 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.607697 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.608319 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.609510 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.610146 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.610915 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.612292 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.612901 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.676093 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604"} Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.676855 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"dd69052c6920a2a9102103c78853484b922594ddf42d050027771c195c4e4593"} Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.678182 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"71c1635b8da62ad02113d787572b1d11afc4cee74aa352982f68ede4adc7460b"} Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.679946 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc"} Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.679991 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa"} Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.680002 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"fd65d9fb7332fc877618a7ed1b981daa00d5eedb3f433d8418b2006628bb08ae"} Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.681766 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.690229 4783 scope.go:117] "RemoveContainer" containerID="ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a" Oct 02 10:53:09 crc kubenswrapper[4783]: E1002 10:53:09.690380 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.711257 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.752749 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.789527 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.820261 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.848116 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.859682 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.879851 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09abd61a7b7c383afc9b731a669e303142e7c6cfd0ddac5d4eb48c5321695bbc\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:52:52Z\\\",\\\"message\\\":\\\"W1002 10:52:50.723579 1 cmd.go:257] Using insecure, self-signed certificates\\\\nI1002 10:52:50.723840 1 crypto.go:601] Generating new CA for check-endpoints-signer@1759402370 cert, and key in /tmp/serving-cert-1013176980/serving-signer.crt, /tmp/serving-cert-1013176980/serving-signer.key\\\\nI1002 10:52:51.688153 1 observer_polling.go:159] Starting file observer\\\\nW1002 10:52:51.698194 1 builder.go:272] unable to get owner reference (falling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1002 10:52:51.698605 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:52:51.700012 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-1013176980/tls.crt::/tmp/serving-cert-1013176980/tls.key\\\\\\\"\\\\nF1002 10:52:52.003238 1 cmd.go:182] error initializing delegating authentication: unable to load configmap based request-header-client-ca-file: Get \\\\\\\"https://localhost:6443/api/v1/namespaces/kube-system/configmaps/extension-apiserver-authentication\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.893072 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.909617 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.921195 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.931947 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.949813 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.954712 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.965913 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.968061 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.980570 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:09 crc kubenswrapper[4783]: I1002 10:53:09.995063 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:09Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.017831 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.039949 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.052585 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.063758 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.076443 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.076516 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.076580 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.076627 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:53:12.076599703 +0000 UTC m=+25.392793964 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.076666 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.076730 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:12.076714186 +0000 UTC m=+25.392908457 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.076800 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.076903 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:12.07688389 +0000 UTC m=+25.393078161 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.078202 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.090679 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.095340 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.109239 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.124065 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.142278 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.149049 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-2j8rt"] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.149542 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.150719 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-wmn4g"] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.151374 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.154131 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.154353 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.154469 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.154543 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.155200 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.155782 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.157817 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.158764 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.159157 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-jqvp2"] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.159549 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.159655 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.159986 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-ks7tf"] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.160227 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.160585 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.164425 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.166245 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.166264 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.166370 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.166530 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.174105 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177242 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/94681624-a0a9-443a-9b4d-715182399740-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177272 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f6c8d5bc-163f-401f-bdc5-4625112dced9-cni-binary-copy\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177289 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-etc-kubernetes\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177313 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-netns\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177368 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-cni-multus\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177383 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-multus-certs\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177463 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177516 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-os-release\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177536 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-os-release\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177562 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-socket-dir-parent\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177616 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-daemon-config\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177642 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177660 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177684 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177698 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.177700 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-cnibin\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177782 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177815 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177826 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177836 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:12.177818226 +0000 UTC m=+25.494012497 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.177872 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:12.177862157 +0000 UTC m=+25.494056418 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178358 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6gs6\" (UniqueName: \"kubernetes.io/projected/f6c8d5bc-163f-401f-bdc5-4625112dced9-kube-api-access-l6gs6\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178433 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/94681624-a0a9-443a-9b4d-715182399740-cni-binary-copy\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178450 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-k8s-cni-cncf-io\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178466 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0cbc7f6d-232e-484d-9afc-7111e428762c-hosts-file\") pod \"node-resolver-ks7tf\" (UID: \"0cbc7f6d-232e-484d-9afc-7111e428762c\") " pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178483 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3288cc82-59a8-408e-8b0e-b5255882b4fb-rootfs\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178497 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3288cc82-59a8-408e-8b0e-b5255882b4fb-proxy-tls\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178510 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-conf-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178526 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rplhm\" (UniqueName: \"kubernetes.io/projected/3288cc82-59a8-408e-8b0e-b5255882b4fb-kube-api-access-rplhm\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178578 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-system-cni-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178608 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-cnibin\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178627 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-hostroot\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178649 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-system-cni-dir\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178669 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3288cc82-59a8-408e-8b0e-b5255882b4fb-mcd-auth-proxy-config\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178689 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178709 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj8jd\" (UniqueName: \"kubernetes.io/projected/0cbc7f6d-232e-484d-9afc-7111e428762c-kube-api-access-fj8jd\") pod \"node-resolver-ks7tf\" (UID: \"0cbc7f6d-232e-484d-9afc-7111e428762c\") " pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178732 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8zq9\" (UniqueName: \"kubernetes.io/projected/94681624-a0a9-443a-9b4d-715182399740-kube-api-access-h8zq9\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178753 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-cni-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178777 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-cni-bin\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.178800 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-kubelet\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.188249 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.210073 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.231190 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.266299 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279365 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rplhm\" (UniqueName: \"kubernetes.io/projected/3288cc82-59a8-408e-8b0e-b5255882b4fb-kube-api-access-rplhm\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279405 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-system-cni-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279460 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-cnibin\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279476 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-hostroot\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279498 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-system-cni-dir\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279513 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3288cc82-59a8-408e-8b0e-b5255882b4fb-mcd-auth-proxy-config\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279529 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279544 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj8jd\" (UniqueName: \"kubernetes.io/projected/0cbc7f6d-232e-484d-9afc-7111e428762c-kube-api-access-fj8jd\") pod \"node-resolver-ks7tf\" (UID: \"0cbc7f6d-232e-484d-9afc-7111e428762c\") " pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279559 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8zq9\" (UniqueName: \"kubernetes.io/projected/94681624-a0a9-443a-9b4d-715182399740-kube-api-access-h8zq9\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279572 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-cni-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279588 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-cni-bin\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279602 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-kubelet\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279618 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/94681624-a0a9-443a-9b4d-715182399740-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279632 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f6c8d5bc-163f-401f-bdc5-4625112dced9-cni-binary-copy\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279658 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-etc-kubernetes\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279679 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-netns\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279693 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-cni-multus\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279707 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-multus-certs\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279729 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-os-release\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279744 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-os-release\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279765 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-socket-dir-parent\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279779 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-daemon-config\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279799 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-cnibin\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279821 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6gs6\" (UniqueName: \"kubernetes.io/projected/f6c8d5bc-163f-401f-bdc5-4625112dced9-kube-api-access-l6gs6\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279836 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/94681624-a0a9-443a-9b4d-715182399740-cni-binary-copy\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279850 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-k8s-cni-cncf-io\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279870 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0cbc7f6d-232e-484d-9afc-7111e428762c-hosts-file\") pod \"node-resolver-ks7tf\" (UID: \"0cbc7f6d-232e-484d-9afc-7111e428762c\") " pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279884 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3288cc82-59a8-408e-8b0e-b5255882b4fb-rootfs\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279900 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3288cc82-59a8-408e-8b0e-b5255882b4fb-proxy-tls\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279913 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-conf-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.279965 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-conf-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.280241 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-system-cni-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.280277 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-cnibin\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.280303 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-hostroot\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.280324 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-system-cni-dir\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.280858 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/3288cc82-59a8-408e-8b0e-b5255882b4fb-mcd-auth-proxy-config\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.280980 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-multus-certs\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281253 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-cni-dir\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281284 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-cni-bin\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281306 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-kubelet\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281329 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-os-release\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281400 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-socket-dir-parent\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281392 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-os-release\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281541 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-cnibin\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281557 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/0cbc7f6d-232e-484d-9afc-7111e428762c-hosts-file\") pod \"node-resolver-ks7tf\" (UID: \"0cbc7f6d-232e-484d-9afc-7111e428762c\") " pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281596 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-k8s-cni-cncf-io\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281601 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-etc-kubernetes\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281623 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-var-lib-cni-multus\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281653 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/3288cc82-59a8-408e-8b0e-b5255882b4fb-rootfs\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281651 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/f6c8d5bc-163f-401f-bdc5-4625112dced9-host-run-netns\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.281808 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/94681624-a0a9-443a-9b4d-715182399740-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.282007 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/94681624-a0a9-443a-9b4d-715182399740-cni-binary-copy\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.282491 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/f6c8d5bc-163f-401f-bdc5-4625112dced9-multus-daemon-config\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.282516 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/94681624-a0a9-443a-9b4d-715182399740-tuning-conf-dir\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.282582 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/f6c8d5bc-163f-401f-bdc5-4625112dced9-cni-binary-copy\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.286912 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/3288cc82-59a8-408e-8b0e-b5255882b4fb-proxy-tls\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.299391 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.303465 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rplhm\" (UniqueName: \"kubernetes.io/projected/3288cc82-59a8-408e-8b0e-b5255882b4fb-kube-api-access-rplhm\") pod \"machine-config-daemon-2j8rt\" (UID: \"3288cc82-59a8-408e-8b0e-b5255882b4fb\") " pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.309879 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6gs6\" (UniqueName: \"kubernetes.io/projected/f6c8d5bc-163f-401f-bdc5-4625112dced9-kube-api-access-l6gs6\") pod \"multus-wmn4g\" (UID: \"f6c8d5bc-163f-401f-bdc5-4625112dced9\") " pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.309884 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8zq9\" (UniqueName: \"kubernetes.io/projected/94681624-a0a9-443a-9b4d-715182399740-kube-api-access-h8zq9\") pod \"multus-additional-cni-plugins-jqvp2\" (UID: \"94681624-a0a9-443a-9b4d-715182399740\") " pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.321098 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.339791 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj8jd\" (UniqueName: \"kubernetes.io/projected/0cbc7f6d-232e-484d-9afc-7111e428762c-kube-api-access-fj8jd\") pod \"node-resolver-ks7tf\" (UID: \"0cbc7f6d-232e-484d-9afc-7111e428762c\") " pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.362546 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.374982 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.386551 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.401983 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.418790 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.446138 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.462312 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.465092 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.473401 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: W1002 10:53:10.475150 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3288cc82_59a8_408e_8b0e_b5255882b4fb.slice/crio-eae73648cb3a25002a9b5fa77c8bb161825fa534ca68e4af27b2ebd59bbfd951 WatchSource:0}: Error finding container eae73648cb3a25002a9b5fa77c8bb161825fa534ca68e4af27b2ebd59bbfd951: Status 404 returned error can't find the container with id eae73648cb3a25002a9b5fa77c8bb161825fa534ca68e4af27b2ebd59bbfd951 Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.475594 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-wmn4g" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.481779 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-ks7tf" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.484994 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" Oct 02 10:53:10 crc kubenswrapper[4783]: W1002 10:53:10.490178 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf6c8d5bc_163f_401f_bdc5_4625112dced9.slice/crio-e36b9ccf3eb37d0ad6ff5bc568440d12138781eefedc29226cc73914eed02d45 WatchSource:0}: Error finding container e36b9ccf3eb37d0ad6ff5bc568440d12138781eefedc29226cc73914eed02d45: Status 404 returned error can't find the container with id e36b9ccf3eb37d0ad6ff5bc568440d12138781eefedc29226cc73914eed02d45 Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.518755 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.544200 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.544314 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.544368 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.544405 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.544452 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:10 crc kubenswrapper[4783]: E1002 10:53:10.544489 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.579211 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qmd84"] Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.580089 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.584744 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.584947 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.585071 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.585210 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.585301 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.585398 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.585577 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.588911 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.635257 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.664138 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684302 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-etc-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684364 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-script-lib\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684390 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-systemd-units\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684438 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-kubelet\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684462 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-ovn\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684482 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-systemd\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684521 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-var-lib-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684544 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-bin\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684562 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-env-overrides\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684606 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-config\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684632 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684675 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-slash\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684710 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684730 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-ovn-kubernetes\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684779 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-netns\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684801 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-log-socket\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684846 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-node-log\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684869 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/095cdcdf-1ea0-40da-871a-1223c6737377-ovn-node-metrics-cert\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684890 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhvtk\" (UniqueName: \"kubernetes.io/projected/095cdcdf-1ea0-40da-871a-1223c6737377-kube-api-access-hhvtk\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.684939 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-netd\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.702451 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.713446 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerStarted","Data":"66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7"} Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.713530 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerStarted","Data":"e36b9ccf3eb37d0ad6ff5bc568440d12138781eefedc29226cc73914eed02d45"} Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.720636 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.721053 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"eae73648cb3a25002a9b5fa77c8bb161825fa534ca68e4af27b2ebd59bbfd951"} Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.722041 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerStarted","Data":"6661545a9659562869e5288ce482913f4e9f66a405ceafeeede46194f1da0324"} Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.723126 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-ks7tf" event={"ID":"0cbc7f6d-232e-484d-9afc-7111e428762c","Type":"ContainerStarted","Data":"413010c3143a20aa0dca9eb335694dd811942c6e85520ace8b0e20a900d69819"} Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.736152 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.771516 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785489 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-netns\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785520 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-log-socket\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785545 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-node-log\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785560 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/095cdcdf-1ea0-40da-871a-1223c6737377-ovn-node-metrics-cert\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785582 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhvtk\" (UniqueName: \"kubernetes.io/projected/095cdcdf-1ea0-40da-871a-1223c6737377-kube-api-access-hhvtk\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785605 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-netd\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785628 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-etc-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785643 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-systemd-units\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785656 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-script-lib\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785673 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-kubelet\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785687 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-ovn\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785703 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-systemd\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785718 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-var-lib-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785760 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-bin\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785775 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-env-overrides\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785801 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-config\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785819 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785834 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-slash\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785855 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785870 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-ovn-kubernetes\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785924 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-ovn-kubernetes\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785960 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-netns\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.785983 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-log-socket\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.786004 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-node-log\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.786884 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-systemd\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.787182 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-netd\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.787219 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-etc-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.787242 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-systemd-units\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.787847 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-script-lib\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.787888 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-kubelet\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.787912 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-ovn\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.788284 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-config\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.788322 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-var-lib-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.788345 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-bin\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.788654 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-env-overrides\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.788693 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-slash\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.788718 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.788741 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-openvswitch\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.799474 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/095cdcdf-1ea0-40da-871a-1223c6737377-ovn-node-metrics-cert\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.815912 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.821924 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhvtk\" (UniqueName: \"kubernetes.io/projected/095cdcdf-1ea0-40da-871a-1223c6737377-kube-api-access-hhvtk\") pod \"ovnkube-node-qmd84\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.836367 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.863697 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.882104 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.898237 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.903677 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.911919 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.935156 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.950244 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.968685 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.977142 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:10 crc kubenswrapper[4783]: I1002 10:53:10.995876 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:10Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.010981 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.031556 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.051944 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.726812 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5"} Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.726859 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab"} Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.728577 4783 generic.go:334] "Generic (PLEG): container finished" podID="94681624-a0a9-443a-9b4d-715182399740" containerID="5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5" exitCode=0 Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.728644 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerDied","Data":"5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5"} Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.730193 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-ks7tf" event={"ID":"0cbc7f6d-232e-484d-9afc-7111e428762c","Type":"ContainerStarted","Data":"160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9"} Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.731560 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074"} Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.732760 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54" exitCode=0 Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.732786 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.732799 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"ed4b10c90beeaf12a5f099fbd95c1695e285d247972b79106d428ace9bea2d86"} Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.762836 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.780859 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.803981 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.815472 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.827287 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.843020 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.854285 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.865505 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.875869 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.892173 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.906658 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.921698 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.933326 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.949972 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.964540 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.985742 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:11 crc kubenswrapper[4783]: I1002 10:53:11.997402 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:11Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.015363 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.036132 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.053806 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.067327 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.090926 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.095558 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.095648 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.095713 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.095805 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.095851 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:16.095836882 +0000 UTC m=+29.412031143 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.096142 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:53:16.09613191 +0000 UTC m=+29.412326171 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.096209 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.096239 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:16.096229482 +0000 UTC m=+29.412423743 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.107814 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.122614 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.138600 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.157047 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.169447 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.186388 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.196717 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.196760 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196895 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196909 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196919 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196918 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196942 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196952 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196972 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:16.196959014 +0000 UTC m=+29.513153265 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.196994 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:16.196979844 +0000 UTC m=+29.513174105 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.514946 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-vxpvq"] Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.516623 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.520720 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.520899 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.520959 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.521057 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.536341 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.544313 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.544358 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.544313 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.544456 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.544552 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:12 crc kubenswrapper[4783]: E1002 10:53:12.544623 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.554306 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.567354 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.578248 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.589176 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.600177 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0781b010-f65f-4e49-9d78-48eda11666fb-host\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.600218 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/0781b010-f65f-4e49-9d78-48eda11666fb-serviceca\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.600339 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbk2x\" (UniqueName: \"kubernetes.io/projected/0781b010-f65f-4e49-9d78-48eda11666fb-kube-api-access-wbk2x\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.605609 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.622781 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.637465 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.659597 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.669832 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.688126 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.700891 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0781b010-f65f-4e49-9d78-48eda11666fb-host\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.700947 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/0781b010-f65f-4e49-9d78-48eda11666fb-serviceca\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.700988 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbk2x\" (UniqueName: \"kubernetes.io/projected/0781b010-f65f-4e49-9d78-48eda11666fb-kube-api-access-wbk2x\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.701346 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/0781b010-f65f-4e49-9d78-48eda11666fb-host\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.703058 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/0781b010-f65f-4e49-9d78-48eda11666fb-serviceca\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.707119 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.722701 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.725843 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbk2x\" (UniqueName: \"kubernetes.io/projected/0781b010-f65f-4e49-9d78-48eda11666fb-kube-api-access-wbk2x\") pod \"node-ca-vxpvq\" (UID: \"0781b010-f65f-4e49-9d78-48eda11666fb\") " pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.735381 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.742146 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.742188 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.742199 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.742207 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.742216 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.745710 4783 generic.go:334] "Generic (PLEG): container finished" podID="94681624-a0a9-443a-9b4d-715182399740" containerID="da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22" exitCode=0 Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.745858 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerDied","Data":"da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22"} Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.750265 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.767039 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.780778 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.794768 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.805913 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.815814 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.834489 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-vxpvq" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.835096 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: W1002 10:53:12.847187 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0781b010_f65f_4e49_9d78_48eda11666fb.slice/crio-1e956405cf3210742ff78c362a01a433d0be0ed725db2fd0251f166a42ab4554 WatchSource:0}: Error finding container 1e956405cf3210742ff78c362a01a433d0be0ed725db2fd0251f166a42ab4554: Status 404 returned error can't find the container with id 1e956405cf3210742ff78c362a01a433d0be0ed725db2fd0251f166a42ab4554 Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.848276 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.858728 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.871531 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.912106 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:12 crc kubenswrapper[4783]: I1002 10:53:12.989987 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:12Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.031848 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.055182 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.072444 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.113248 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.751361 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vxpvq" event={"ID":"0781b010-f65f-4e49-9d78-48eda11666fb","Type":"ContainerStarted","Data":"feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8"} Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.751466 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-vxpvq" event={"ID":"0781b010-f65f-4e49-9d78-48eda11666fb","Type":"ContainerStarted","Data":"1e956405cf3210742ff78c362a01a433d0be0ed725db2fd0251f166a42ab4554"} Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.754901 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.757370 4783 generic.go:334] "Generic (PLEG): container finished" podID="94681624-a0a9-443a-9b4d-715182399740" containerID="5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63" exitCode=0 Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.757434 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerDied","Data":"5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63"} Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.773300 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.798266 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.821783 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.841064 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.853307 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.864684 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.875685 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.897508 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.910796 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.923880 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.938022 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.957855 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.969012 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.981946 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:13 crc kubenswrapper[4783]: I1002 10:53:13.993438 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:13Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.005053 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.016288 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.030352 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.041156 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.054566 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.066809 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.079851 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.095976 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.108918 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.122115 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.149056 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.175660 4783 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.177117 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.177154 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.177164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.177270 4783 kubelet_node_status.go:76] "Attempting to register node" node="crc" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.188811 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.240519 4783 kubelet_node_status.go:115] "Node was previously registered" node="crc" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.240793 4783 kubelet_node_status.go:79] "Successfully registered node" node="crc" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.241728 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.241759 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.241772 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.241787 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.241798 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.257507 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.260847 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.260892 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.260904 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.260920 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.261156 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.266285 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.271893 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.274684 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.274722 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.274734 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.274749 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.274759 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.286985 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.289779 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.289817 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.289825 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.289842 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.289851 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.304864 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.307673 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.307706 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.307718 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.307733 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.307744 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.321374 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.324269 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.324438 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.326178 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.326207 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.326217 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.326231 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.326245 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.347476 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.428271 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.428312 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.428324 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.428342 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.428353 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.530962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.530994 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.531002 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.531016 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.531026 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.544617 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.544641 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.544735 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.544734 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.544862 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:14 crc kubenswrapper[4783]: E1002 10:53:14.545070 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.633323 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.633363 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.633371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.633400 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.633434 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.736001 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.736044 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.736055 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.736069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.736078 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.762922 4783 generic.go:334] "Generic (PLEG): container finished" podID="94681624-a0a9-443a-9b4d-715182399740" containerID="2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004" exitCode=0 Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.762970 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerDied","Data":"2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.777733 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.793870 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.806453 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.838554 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.844733 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.844841 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.844937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.845047 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.845166 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.850804 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.862607 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.882378 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.894927 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.909160 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.926792 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.938572 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.947431 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.947466 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.947474 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.947490 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.947499 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:14Z","lastTransitionTime":"2025-10-02T10:53:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.947830 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.963209 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.974300 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:14 crc kubenswrapper[4783]: I1002 10:53:14.984021 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:14Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.050473 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.050543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.050559 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.050577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.050616 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.154097 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.154132 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.154140 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.154155 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.154164 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.257651 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.257687 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.257694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.257709 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.257720 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.361475 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.361551 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.361568 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.361588 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.361632 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.464918 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.465024 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.465043 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.465118 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.465217 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.569635 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.569748 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.569775 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.569844 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.569868 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.586223 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.587205 4783 scope.go:117] "RemoveContainer" containerID="ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a" Oct 02 10:53:15 crc kubenswrapper[4783]: E1002 10:53:15.587471 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-apiserver-check-endpoints\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\"" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.673772 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.673809 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.673820 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.673839 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.673852 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.771450 4783 generic.go:334] "Generic (PLEG): container finished" podID="94681624-a0a9-443a-9b4d-715182399740" containerID="805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4" exitCode=0 Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.771580 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerDied","Data":"805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.775980 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.776017 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.776038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.776064 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.776085 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.778601 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.788875 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.810463 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.831488 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.844143 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.865064 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.878431 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.878461 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.878473 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.878489 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.878506 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.881882 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.902848 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.917330 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.946286 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.963063 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.981011 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.981053 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.981066 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.981084 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.981098 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:15Z","lastTransitionTime":"2025-10-02T10:53:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.983552 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:15 crc kubenswrapper[4783]: I1002 10:53:15.995323 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:15Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.009678 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.024829 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.038279 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.083156 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.083211 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.083223 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.083241 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.083255 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.136143 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.136310 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.136466 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.136483 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:53:24.136450635 +0000 UTC m=+37.452644906 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.136583 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.136669 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:24.13664381 +0000 UTC m=+37.452838111 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.136732 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.136799 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:24.136783483 +0000 UTC m=+37.452977754 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.186256 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.186325 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.186345 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.186372 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.186394 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.237848 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.237904 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238029 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238048 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238061 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238070 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238114 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238139 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238118 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:24.238103469 +0000 UTC m=+37.554297740 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.238218 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:24.238207932 +0000 UTC m=+37.554402203 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.289267 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.289317 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.289362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.289386 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.289409 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.392486 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.392546 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.392562 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.392584 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.392601 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.494837 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.494891 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.494902 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.494919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.494928 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.544228 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.544328 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.544256 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.544516 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.544431 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:16 crc kubenswrapper[4783]: E1002 10:53:16.544593 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.597980 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.598037 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.598055 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.598077 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.598094 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.701088 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.701155 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.701178 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.701205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.701227 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.786233 4783 generic.go:334] "Generic (PLEG): container finished" podID="94681624-a0a9-443a-9b4d-715182399740" containerID="ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e" exitCode=0 Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.786271 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerDied","Data":"ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.805302 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.805337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.805348 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.805363 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.805375 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.816831 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.839182 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.864189 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.890867 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.904581 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.909328 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.909361 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.909371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.909384 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.909392 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:16Z","lastTransitionTime":"2025-10-02T10:53:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.935724 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.948826 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.963707 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.979207 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:16 crc kubenswrapper[4783]: I1002 10:53:16.990599 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.002660 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.014444 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.014468 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.014476 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.014490 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.014500 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.018578 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.027491 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.038209 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.052868 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.117025 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.117077 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.117097 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.117114 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.117127 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.219908 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.219939 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.219948 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.219962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.219971 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.321883 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.321955 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.321979 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.322011 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.322035 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.424531 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.424577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.424591 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.424609 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.424621 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.528165 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.528229 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.528247 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.528270 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.528289 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.561167 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.593392 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.613050 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.627392 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.630675 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.630714 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.630727 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.630745 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.630760 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.640792 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.658076 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.677151 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.693486 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.710995 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.722529 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.733291 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.734296 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.734337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.734348 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.734364 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.734376 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.747354 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.759309 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.771299 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.783178 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.793254 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" event={"ID":"94681624-a0a9-443a-9b4d-715182399740","Type":"ContainerStarted","Data":"7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.797796 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.798346 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.798458 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.806273 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.819067 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.829684 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.836752 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.836997 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.837086 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.837171 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.837257 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.843804 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.845161 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.850856 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.857106 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.866718 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.882516 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.893203 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.910325 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.921572 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.931642 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.939149 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.939338 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.939435 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.939510 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.939576 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:17Z","lastTransitionTime":"2025-10-02T10:53:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.941081 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.959780 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.971368 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:17 crc kubenswrapper[4783]: I1002 10:53:17.981705 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.002548 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.014671 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.025738 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.041518 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.042912 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.042949 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.042959 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.042972 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.042981 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.055330 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.069398 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.079812 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.089539 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.098902 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.120310 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.131054 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.141668 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.144794 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.144827 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.144837 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.144854 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.144865 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.156404 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.194311 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.220192 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:18Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.246810 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.246846 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.246855 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.246869 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.246879 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.348865 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.348910 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.348921 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.348937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.348950 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.452485 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.452542 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.452566 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.452597 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.452658 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.544449 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.544481 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:18 crc kubenswrapper[4783]: E1002 10:53:18.544657 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.544682 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:18 crc kubenswrapper[4783]: E1002 10:53:18.545430 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:18 crc kubenswrapper[4783]: E1002 10:53:18.545299 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.554986 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.555034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.555054 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.555077 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.555093 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.658197 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.658245 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.658256 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.658273 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.658290 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.760839 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.760900 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.760918 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.760942 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.760980 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.800907 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.863351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.863436 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.863454 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.863477 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.863494 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.966068 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.966135 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.966152 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.966176 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:18 crc kubenswrapper[4783]: I1002 10:53:18.966193 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:18Z","lastTransitionTime":"2025-10-02T10:53:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.069240 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.069314 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.069339 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.069369 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.069392 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.172038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.172102 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.172120 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.172146 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.172163 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.274479 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.274544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.274558 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.274574 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.274586 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.377874 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.377937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.377954 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.377977 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.377994 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.481132 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.481197 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.481214 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.481241 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.481259 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.584192 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.584250 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.584268 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.584284 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.584294 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.687329 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.687366 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.687377 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.687395 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.687408 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.790462 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.790781 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.790822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.790848 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.790880 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.803607 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.894516 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.894565 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.894583 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.894609 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.894630 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.998267 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.998313 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.998325 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.998344 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:19 crc kubenswrapper[4783]: I1002 10:53:19.998355 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:19Z","lastTransitionTime":"2025-10-02T10:53:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.100785 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.100822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.100833 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.100849 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.100860 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.202847 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.202894 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.202908 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.202927 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.202940 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.305125 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.305167 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.305182 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.305201 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.305215 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.407192 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.407224 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.407234 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.407247 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.407255 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.509726 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.509775 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.509791 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.509815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.509832 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.544341 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:20 crc kubenswrapper[4783]: E1002 10:53:20.544504 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.544955 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:20 crc kubenswrapper[4783]: E1002 10:53:20.545029 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.545087 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:20 crc kubenswrapper[4783]: E1002 10:53:20.545186 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.612245 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.612294 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.612303 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.612316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.612325 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.715146 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.715313 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.715322 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.715335 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.715343 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.817633 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.817677 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.817689 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.817708 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.817722 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.920394 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.920458 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.920472 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.920491 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:20 crc kubenswrapper[4783]: I1002 10:53:20.920502 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:20Z","lastTransitionTime":"2025-10-02T10:53:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.024459 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.024601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.024618 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.024775 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.024800 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.128785 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.128829 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.128840 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.128877 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.128890 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.231746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.231793 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.231806 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.231822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.231834 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.335321 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.335387 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.335451 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.335485 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.335506 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.438360 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.438452 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.438471 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.438496 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.438513 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.541592 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.541700 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.541723 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.541754 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.541780 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.645398 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.645506 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.645536 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.645568 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.645591 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.748560 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.748699 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.748718 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.748744 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.748763 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.815204 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/0.log" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.820011 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2" exitCode=1 Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.820060 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.821297 4783 scope.go:117] "RemoveContainer" containerID="1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.843475 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:21Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.851543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.851604 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.851622 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.851646 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.851664 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.876097 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:21Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.892072 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:21Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.910186 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:21Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.954826 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:21Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.958019 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.958064 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.958081 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.958119 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.958141 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:21Z","lastTransitionTime":"2025-10-02T10:53:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.976838 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:21Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:21 crc kubenswrapper[4783]: I1002 10:53:21.992594 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:21Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.006229 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.018580 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.031803 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.044290 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.057144 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.060708 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.060845 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.060928 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.060998 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.061059 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.069119 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.080605 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.095872 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.164330 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.164398 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.164429 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.164452 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.164464 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.267175 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.267219 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.267229 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.267244 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.267252 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.369945 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.369982 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.369991 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.370004 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.370013 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.472948 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.472999 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.473012 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.473040 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.473056 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.540103 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8"] Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.540579 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.542311 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.542809 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.543721 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.543778 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.543864 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:22 crc kubenswrapper[4783]: E1002 10:53:22.543954 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:22 crc kubenswrapper[4783]: E1002 10:53:22.544047 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:22 crc kubenswrapper[4783]: E1002 10:53:22.544156 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.561676 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.575935 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.575983 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.576001 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.576035 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.576050 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.579277 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.599801 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.603507 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ppwc7\" (UniqueName: \"kubernetes.io/projected/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-kube-api-access-ppwc7\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.603562 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.603601 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.603620 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.621132 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.633271 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.649788 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.661406 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.672555 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.678184 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.678219 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.678230 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.678243 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.678252 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.683855 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.692444 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.704545 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ppwc7\" (UniqueName: \"kubernetes.io/projected/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-kube-api-access-ppwc7\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.704603 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.704648 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.704679 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.705638 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.705650 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-env-overrides\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.711973 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.722505 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.730117 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ppwc7\" (UniqueName: \"kubernetes.io/projected/aef16d25-482a-4a0f-91e0-b67cdb92c4ed-kube-api-access-ppwc7\") pod \"ovnkube-control-plane-749d76644c-2q7k8\" (UID: \"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.740096 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.752803 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.764246 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.777319 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.783359 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.783405 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.783434 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.783460 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.783473 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.789217 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.824520 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/0.log" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.826869 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.827076 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.840918 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.851783 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.860116 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: W1002 10:53:22.867828 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaef16d25_482a_4a0f_91e0_b67cdb92c4ed.slice/crio-05b70a6394def18ff9eb7049f454ce94a5846bb5d7c0e9c88a5f56497ae19005 WatchSource:0}: Error finding container 05b70a6394def18ff9eb7049f454ce94a5846bb5d7c0e9c88a5f56497ae19005: Status 404 returned error can't find the container with id 05b70a6394def18ff9eb7049f454ce94a5846bb5d7c0e9c88a5f56497ae19005 Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.873769 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.885840 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.885880 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.885896 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.885915 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.885927 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.890030 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.910138 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.927359 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.938899 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.951069 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.968220 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.982015 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.989393 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.989473 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.989491 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.989512 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.989528 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:22Z","lastTransitionTime":"2025-10-02T10:53:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:22 crc kubenswrapper[4783]: I1002 10:53:22.993566 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:22Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.005659 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.018321 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.027914 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.040390 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.053522 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.091645 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.091690 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.091702 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.091718 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.091732 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.195126 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.195152 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.195164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.195179 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.195189 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.298308 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.298558 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.298644 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.298749 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.298833 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.400539 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.400568 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.400578 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.400592 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.400602 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.503332 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.503368 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.503378 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.503395 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.503406 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.610481 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.610547 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.610567 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.610595 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.610619 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.714056 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.714117 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.714150 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.714173 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.714192 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.818256 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.818315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.818333 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.818357 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.818378 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.832641 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/1.log" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.833586 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/0.log" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.838198 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da" exitCode=1 Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.838249 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.838334 4783 scope.go:117] "RemoveContainer" containerID="1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.839468 4783 scope.go:117] "RemoveContainer" containerID="328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da" Oct 02 10:53:23 crc kubenswrapper[4783]: E1002 10:53:23.839737 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.840969 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" event={"ID":"aef16d25-482a-4a0f-91e0-b67cdb92c4ed","Type":"ContainerStarted","Data":"50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.841029 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" event={"ID":"aef16d25-482a-4a0f-91e0-b67cdb92c4ed","Type":"ContainerStarted","Data":"108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.841049 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" event={"ID":"aef16d25-482a-4a0f-91e0-b67cdb92c4ed","Type":"ContainerStarted","Data":"05b70a6394def18ff9eb7049f454ce94a5846bb5d7c0e9c88a5f56497ae19005"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.865222 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.888174 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.903059 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.922335 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.922378 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.922389 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.922439 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.922456 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:23Z","lastTransitionTime":"2025-10-02T10:53:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.926824 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.942489 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.978335 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:23 crc kubenswrapper[4783]: I1002 10:53:23.999333 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:23Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.020269 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.025928 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.026005 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.026027 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.026054 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.026073 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.042361 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.062634 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.085097 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.100845 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.121809 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.129132 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.129199 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.129222 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.129253 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.129276 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.141329 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.158755 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.173660 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.190542 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.208462 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.219662 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.219782 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.219865 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.219980 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.220016 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.220087 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:40.220062399 +0000 UTC m=+53.536256700 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.220116 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:40.22010301 +0000 UTC m=+53.536297301 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.220201 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:53:40.220186913 +0000 UTC m=+53.536381204 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.232145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.232208 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.232235 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.232266 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.232289 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.235547 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.259894 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.275359 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.290585 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.320622 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.320742 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.320948 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.320992 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.321000 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.321029 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.321033 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.321056 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.321133 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:40.321108058 +0000 UTC m=+53.637302359 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.321162 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:40.32114993 +0000 UTC m=+53.637344231 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.332074 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.335255 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.335334 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.335358 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.335390 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.335462 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.356127 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.379037 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.398103 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.422931 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.425625 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-6qbg4"] Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.426311 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.426407 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.438480 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.438542 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.438567 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.438595 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.438615 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.441264 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.444069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.444120 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.444142 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.444171 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.444225 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.462343 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.466174 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.471564 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.471604 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.471621 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.471643 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.471660 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.484125 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.491984 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.497227 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.497277 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.497294 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.497316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.497332 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.507547 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.518755 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.522234 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.522327 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scffz\" (UniqueName: \"kubernetes.io/projected/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-kube-api-access-scffz\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.523802 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.523864 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.523887 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.523917 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.523940 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.532751 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.543837 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.543936 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.543861 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.544070 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.544146 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.544264 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.545801 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.551189 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.551286 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.551303 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.551323 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.551341 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.555107 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.573676 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.573904 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.575915 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.575965 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.575987 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.576010 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.576028 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.579207 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.600165 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.618152 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.623256 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scffz\" (UniqueName: \"kubernetes.io/projected/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-kube-api-access-scffz\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.623342 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.623530 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: E1002 10:53:24.623610 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:25.123591659 +0000 UTC m=+38.439785920 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.635817 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.653648 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scffz\" (UniqueName: \"kubernetes.io/projected/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-kube-api-access-scffz\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.662496 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.680474 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.680535 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.680554 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.680579 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.680600 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.683753 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.713712 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.749747 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.772045 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.782588 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.782616 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.782625 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.782638 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.782646 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.791147 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.802151 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.819846 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.834302 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=kube-apiserver-check-endpoints pod=kube-apiserver-crc_openshift-kube-apiserver(f4b27818a5e8e43d0dc095d08835c792)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.844803 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/1.log" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.846453 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.859799 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.873659 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:24Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.885305 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.885352 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.885366 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.885379 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.885389 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.987841 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.987900 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.987919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.987946 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:24 crc kubenswrapper[4783]: I1002 10:53:24.987965 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:24Z","lastTransitionTime":"2025-10-02T10:53:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.090887 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.091284 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.091375 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.091485 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.091568 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.129824 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:25 crc kubenswrapper[4783]: E1002 10:53:25.130004 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:25 crc kubenswrapper[4783]: E1002 10:53:25.130065 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:26.130046586 +0000 UTC m=+39.446240867 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.194978 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.195058 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.195082 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.195114 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.195138 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.298221 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.298282 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.298299 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.298321 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.298338 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.401344 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.401407 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.401446 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.401472 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.401491 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.503992 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.504043 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.504057 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.504077 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.504089 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.606181 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.606240 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.606257 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.606278 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.606295 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.708543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.708603 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.708624 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.708650 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.708674 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.811459 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.811515 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.811529 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.811547 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.811562 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.913856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.913913 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.913930 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.913951 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:25 crc kubenswrapper[4783]: I1002 10:53:25.913967 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:25Z","lastTransitionTime":"2025-10-02T10:53:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.017211 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.017255 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.017263 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.017275 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.017284 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.119923 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.119989 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.120011 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.120043 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.120067 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.140597 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:26 crc kubenswrapper[4783]: E1002 10:53:26.140874 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:26 crc kubenswrapper[4783]: E1002 10:53:26.140985 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:28.140953298 +0000 UTC m=+41.457147609 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.223135 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.223198 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.223219 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.223242 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.223260 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.326145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.326187 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.326199 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.326216 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.326227 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.434380 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.434489 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.434636 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.434667 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.434692 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.538011 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.538069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.538112 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.538135 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.538152 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.544292 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:26 crc kubenswrapper[4783]: E1002 10:53:26.544513 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.544610 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:26 crc kubenswrapper[4783]: E1002 10:53:26.544722 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.544807 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:26 crc kubenswrapper[4783]: E1002 10:53:26.544887 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.545324 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.545633 4783 scope.go:117] "RemoveContainer" containerID="ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a" Oct 02 10:53:26 crc kubenswrapper[4783]: E1002 10:53:26.545721 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.640943 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.640986 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.640999 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.641017 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.641030 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.744320 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.744805 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.744818 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.744837 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.744850 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.848936 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.849073 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.849092 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.849118 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.849134 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.855307 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/1.log" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.858029 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.859500 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.895305 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.919220 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.940337 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.959962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.960027 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.960046 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.960070 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.960088 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:26Z","lastTransitionTime":"2025-10-02T10:53:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.963693 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:26 crc kubenswrapper[4783]: I1002 10:53:26.988499 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.005164 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.021242 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.037822 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.056394 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.062856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.062924 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.062942 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.062968 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.062987 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.071824 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.088859 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.104492 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.121939 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.141670 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.166190 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.166244 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.166262 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.166288 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.166307 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.170039 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.187712 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.204813 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.269132 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.269201 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.269219 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.269635 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.269690 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.371935 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.371995 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.372012 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.372037 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.372057 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.475316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.475374 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.475392 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.475449 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.475468 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.563010 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.578365 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.578462 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.578489 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.578523 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.578595 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.580182 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.598777 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.623245 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.642576 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.668586 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.681597 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.681648 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.681664 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.681685 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.681702 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.693073 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.707820 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.730154 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.755190 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c09e85cf525934f3a29fa74aa1d5a8249481ca4df4410b2c46aaec1542fa9c2\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:20Z\\\",\\\"message\\\":\\\"0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683211 5999 reflector.go:311] Stopping reflector *v1.Service (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683466 5999 reflector.go:311] Stopping reflector *v1.Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683540 5999 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.683646 5999 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1002 10:53:20.684032 5999 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1002 10:53:20.684054 5999 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1002 10:53:20.684061 5999 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1002 10:53:20.684084 5999 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1002 10:53:20.684119 5999 factory.go:656] Stopping watch factory\\\\nI1002 10:53:20.684136 5999 ovnkube.go:599] Stopped ovnkube\\\\nI1002 10:53:20.684179 5999 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1002 10:53:20.684189 5999 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1002 10:53:20.684196 5999 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1002 1\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.767482 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.780063 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.783877 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.783926 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.783935 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.783947 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.783956 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.790566 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.808102 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.820522 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.832457 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.847546 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.885490 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.885518 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.885527 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.885538 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.885570 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.988111 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.988155 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.988166 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.988184 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:27 crc kubenswrapper[4783]: I1002 10:53:27.988196 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:27Z","lastTransitionTime":"2025-10-02T10:53:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.091213 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.091276 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.091293 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.091315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.091334 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.162971 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:28 crc kubenswrapper[4783]: E1002 10:53:28.163267 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:28 crc kubenswrapper[4783]: E1002 10:53:28.163457 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:32.163384818 +0000 UTC m=+45.479579149 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.195015 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.195089 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.195107 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.195541 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.195601 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.298922 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.298987 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.299003 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.299027 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.299042 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.401567 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.401610 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.401622 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.401638 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.401649 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.504579 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.504666 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.504695 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.504730 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.504754 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.544159 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.544257 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.544273 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:28 crc kubenswrapper[4783]: E1002 10:53:28.544355 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:28 crc kubenswrapper[4783]: E1002 10:53:28.544573 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.544629 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:28 crc kubenswrapper[4783]: E1002 10:53:28.544701 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:28 crc kubenswrapper[4783]: E1002 10:53:28.544775 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.611201 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.611264 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.611282 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.611304 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.611321 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.714814 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.714878 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.714895 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.714919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.714936 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.817382 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.817516 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.817566 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.817592 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.817610 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.920814 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.920895 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.920917 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.920946 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:28 crc kubenswrapper[4783]: I1002 10:53:28.920968 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:28Z","lastTransitionTime":"2025-10-02T10:53:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.024083 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.024153 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.024179 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.024209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.024230 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.128266 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.128333 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.128354 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.128382 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.128405 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.231544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.231634 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.231652 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.231674 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.231692 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.334488 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.334580 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.334605 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.334638 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.334675 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.437598 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.437714 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.437737 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.437765 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.437791 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.541142 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.541195 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.541217 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.541245 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.541269 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.644374 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.644442 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.644454 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.644470 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.644481 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.746783 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.746846 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.746863 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.746885 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.746928 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.849601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.849664 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.849687 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.849715 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.849744 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.951967 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.952340 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.952543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.952718 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:29 crc kubenswrapper[4783]: I1002 10:53:29.952857 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:29Z","lastTransitionTime":"2025-10-02T10:53:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.056198 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.056277 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.056300 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.056329 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.056353 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.159770 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.159852 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.159877 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.159909 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.159933 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.262992 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.263078 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.263104 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.263135 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.263156 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.366509 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.366565 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.366582 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.366607 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.366624 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.469714 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.469840 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.469864 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.469891 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.469918 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.544357 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.544469 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.544389 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.544535 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:30 crc kubenswrapper[4783]: E1002 10:53:30.544638 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:30 crc kubenswrapper[4783]: E1002 10:53:30.544931 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:30 crc kubenswrapper[4783]: E1002 10:53:30.545071 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:30 crc kubenswrapper[4783]: E1002 10:53:30.545158 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.573080 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.573164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.573184 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.573233 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.573253 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.676180 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.676627 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.676777 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.676981 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.677156 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.780387 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.780747 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.780960 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.781130 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.781331 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.884262 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.884355 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.884374 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.884397 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.884437 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.988074 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.988140 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.988158 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.988183 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:30 crc kubenswrapper[4783]: I1002 10:53:30.988202 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:30Z","lastTransitionTime":"2025-10-02T10:53:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.091275 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.091341 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.091380 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.091456 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.091481 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.194762 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.194834 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.194851 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.194876 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.194892 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.297326 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.297386 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.297409 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.297514 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.297539 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.400926 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.400976 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.400989 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.401008 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.401022 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.504555 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.504633 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.504656 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.504691 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.504713 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.607627 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.607695 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.607719 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.607749 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.607788 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.710299 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.710352 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.710368 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.710388 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.710404 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.813632 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.813686 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.813704 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.813727 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.813743 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.917383 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.917477 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.917497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.917519 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:31 crc kubenswrapper[4783]: I1002 10:53:31.917538 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:31Z","lastTransitionTime":"2025-10-02T10:53:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.020783 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.020853 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.020870 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.020894 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.020915 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.123973 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.124038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.124056 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.124083 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.124100 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.213132 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:32 crc kubenswrapper[4783]: E1002 10:53:32.213339 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:32 crc kubenswrapper[4783]: E1002 10:53:32.213495 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:40.21346145 +0000 UTC m=+53.529655751 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.226494 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.226556 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.226573 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.226596 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.226613 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.329268 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.329315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.329326 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.329343 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.329355 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.432940 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.433009 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.433031 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.433055 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.433072 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.535700 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.535738 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.535747 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.535762 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.535771 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.544240 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.544251 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.544262 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:32 crc kubenswrapper[4783]: E1002 10:53:32.544337 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.544353 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:32 crc kubenswrapper[4783]: E1002 10:53:32.544472 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:32 crc kubenswrapper[4783]: E1002 10:53:32.544786 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:32 crc kubenswrapper[4783]: E1002 10:53:32.544836 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.638629 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.638663 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.638673 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.638688 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.638698 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.742104 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.742152 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.742165 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.742183 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.742259 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.845259 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.845551 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.845566 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.845582 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.845594 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.948464 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.948528 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.948548 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.948575 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:32 crc kubenswrapper[4783]: I1002 10:53:32.948595 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:32Z","lastTransitionTime":"2025-10-02T10:53:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.051209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.051297 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.051316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.051342 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.051359 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.155141 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.155192 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.155209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.155232 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.155249 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.258033 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.258082 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.258093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.258111 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.258124 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.364497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.364575 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.364592 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.364615 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.364666 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.468244 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.468296 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.468314 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.468340 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.468362 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.571203 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.571251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.571263 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.571279 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.571291 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.675340 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.675463 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.675485 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.675510 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.675528 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.779062 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.779135 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.779152 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.779177 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.779194 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.882338 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.882971 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.883206 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.883442 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.883650 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.987049 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.987291 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.987346 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.987369 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:33 crc kubenswrapper[4783]: I1002 10:53:33.987387 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:33Z","lastTransitionTime":"2025-10-02T10:53:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.091099 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.091164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.091183 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.091207 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.091227 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.194116 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.194285 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.194315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.194343 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.194365 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.297239 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.297287 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.297297 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.297313 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.297324 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.400806 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.400869 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.400886 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.400911 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.400928 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.504017 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.504341 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.504531 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.504683 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.504853 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.544782 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.544805 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.544824 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.544887 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.545630 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.545777 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.546007 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.546107 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.591949 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.592064 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.592134 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.592172 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.592239 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.614691 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:34Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.619573 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.619625 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.619643 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.619665 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.619683 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.635388 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:34Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.639904 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.639968 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.639992 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.640020 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.640042 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.659599 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:34Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.663851 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.663900 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.663918 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.663940 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.663956 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.690141 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:34Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.695555 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.695630 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.695656 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.695687 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.695710 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.717551 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:34Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:34Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:34 crc kubenswrapper[4783]: E1002 10:53:34.717781 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.720301 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.720370 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.720398 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.720459 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.720478 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.823955 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.824025 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.824042 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.824066 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.824085 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.927088 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.927238 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.927264 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.927289 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:34 crc kubenswrapper[4783]: I1002 10:53:34.927306 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:34Z","lastTransitionTime":"2025-10-02T10:53:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.030981 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.031051 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.031070 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.031093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.031111 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.134089 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.134149 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.134165 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.134187 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.134224 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.237751 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.237810 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.237853 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.237878 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.237895 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.340512 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.340566 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.340587 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.340615 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.340640 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.443969 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.444031 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.444048 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.444073 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.444093 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.485746 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.486952 4783 scope.go:117] "RemoveContainer" containerID="328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.512236 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.541094 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.547194 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.547238 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.547254 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.547280 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.547298 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.566241 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.599580 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.620465 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.639101 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.650234 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.650295 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.650312 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.650352 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.650373 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.677963 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.702211 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.728963 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.750906 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.753271 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.753351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.753388 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.753450 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.753471 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.767946 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.782022 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.795219 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.808107 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.855532 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.855567 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.855624 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.855641 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.855651 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.857424 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.877434 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.886425 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.891089 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/1.log" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.895399 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.895792 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.908677 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.920882 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.932803 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.943724 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.958116 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.958153 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.958164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.958180 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.958190 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:35Z","lastTransitionTime":"2025-10-02T10:53:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.960843 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:35 crc kubenswrapper[4783]: I1002 10:53:35.995018 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:35Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.012216 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.022886 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.039797 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.054452 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.060781 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.060827 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.060840 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.060854 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.060865 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.064225 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.078569 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.090203 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.101999 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.119346 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.130608 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.142841 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.164202 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.164442 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.164558 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.164639 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.164709 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.267740 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.267806 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.267826 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.267850 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.267867 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.370153 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.370194 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.370205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.370221 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.370258 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.472986 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.473031 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.473039 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.473053 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.473063 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.544246 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.544353 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.544445 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:36 crc kubenswrapper[4783]: E1002 10:53:36.544458 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.544275 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:36 crc kubenswrapper[4783]: E1002 10:53:36.544568 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:36 crc kubenswrapper[4783]: E1002 10:53:36.544709 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:36 crc kubenswrapper[4783]: E1002 10:53:36.544814 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.576465 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.576510 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.576521 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.576541 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.576553 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.679264 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.679335 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.679361 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.679390 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.679448 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.782592 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.782657 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.782675 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.782703 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.782719 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.885884 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.885947 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.885966 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.885992 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.886008 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.910896 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/2.log" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.912316 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/1.log" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.918780 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389" exitCode=1 Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.918861 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389"} Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.919141 4783 scope.go:117] "RemoveContainer" containerID="328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.920922 4783 scope.go:117] "RemoveContainer" containerID="9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389" Oct 02 10:53:36 crc kubenswrapper[4783]: E1002 10:53:36.921254 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.945764 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.962153 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.979206 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.988466 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.988514 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.988531 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.988557 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:36 crc kubenswrapper[4783]: I1002 10:53:36.988575 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:36Z","lastTransitionTime":"2025-10-02T10:53:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.000373 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:36Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.020675 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.045730 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.062953 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.080307 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.092586 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.092648 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.092678 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.092706 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.092721 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.098069 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.117396 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.128307 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.139399 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.152768 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.183177 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.194715 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.194758 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.194770 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.194786 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.194798 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.198391 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.212127 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.226969 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.297445 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.298251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.298344 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.298493 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.298640 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.401824 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.401869 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.401882 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.401900 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.401912 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.504789 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.504854 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.504880 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.504929 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.504956 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.577511 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.597340 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.607400 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.607468 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.607482 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.607502 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.607518 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.615333 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.631448 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.648777 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.669086 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.691092 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.708986 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.709749 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.709832 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.709856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.709887 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.709910 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.721211 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.734649 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.743829 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.755194 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.769216 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.780734 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.799173 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://328d776237bc4cad07836e4be5df0419b0b9da5866cdfccbfe91953b920818da\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"message\\\":\\\"d as: [{Op:mutate Table:Logical_Switch Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {7e8bb06a-06a5-45bc-a752-26a17d322811}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.050965 6142 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.4 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {43933d5e-3c3b-4ff8-8926-04ac25de450e}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051500 6142 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:23.051014 6142 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1002 10:53:23.051591 6142 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:22Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.808679 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.811922 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.812039 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.812118 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.812196 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.812256 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.819078 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.916007 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.916074 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.916090 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.916114 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.916133 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:37Z","lastTransitionTime":"2025-10-02T10:53:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.924824 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/2.log" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.930720 4783 scope.go:117] "RemoveContainer" containerID="9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389" Oct 02 10:53:37 crc kubenswrapper[4783]: E1002 10:53:37.930980 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.951778 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.977093 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:37 crc kubenswrapper[4783]: I1002 10:53:37.994893 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:37Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.018661 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.018685 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.018694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.018707 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.018717 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.026074 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.044857 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.062225 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.096119 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.117163 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.121510 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.121573 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.121595 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.121623 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.121642 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.138028 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.157644 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.176520 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.192249 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.210572 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.225244 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.225308 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.225331 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.225361 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.225386 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.230651 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.249404 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.270699 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.289635 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:38Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.328880 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.328965 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.328982 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.329005 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.329021 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.431813 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.431875 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.431897 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.431920 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.431939 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.535646 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.535722 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.535740 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.535764 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.535782 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.544878 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.544938 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.544946 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.544886 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:38 crc kubenswrapper[4783]: E1002 10:53:38.545097 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:38 crc kubenswrapper[4783]: E1002 10:53:38.545261 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:38 crc kubenswrapper[4783]: E1002 10:53:38.545394 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:38 crc kubenswrapper[4783]: E1002 10:53:38.545505 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.639256 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.639327 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.639348 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.639378 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.639400 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.742380 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.742483 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.742514 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.742542 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.742564 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.845763 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.845827 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.845849 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.845876 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.845898 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.948650 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.948695 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.948711 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.948734 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:38 crc kubenswrapper[4783]: I1002 10:53:38.948751 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:38Z","lastTransitionTime":"2025-10-02T10:53:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.050738 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.050781 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.050797 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.050816 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.050830 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.153661 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.153737 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.153762 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.153793 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.153874 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.256721 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.256822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.256842 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.256869 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.256892 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.359508 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.359556 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.359569 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.359588 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.359600 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.462053 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.462118 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.462138 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.462163 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.462186 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.564922 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.564992 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.565015 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.565047 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.565066 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.667722 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.667776 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.667789 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.667807 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.667820 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.771400 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.771525 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.771551 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.771581 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.771623 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.874536 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.874620 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.874643 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.874672 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.874693 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.978314 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.978382 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.978405 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.978471 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:39 crc kubenswrapper[4783]: I1002 10:53:39.978499 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:39Z","lastTransitionTime":"2025-10-02T10:53:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.081168 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.081213 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.081227 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.081251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.081266 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.184322 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.184394 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.184449 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.184479 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.184502 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.287243 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.287325 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.287343 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.287370 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.287388 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.300686 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.300793 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.300816 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.300857 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.300953 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.300967 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:54:12.300926853 +0000 UTC m=+85.617121164 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.301030 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:54:12.300997755 +0000 UTC m=+85.617192106 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.301045 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.301132 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:54:12.301110098 +0000 UTC m=+85.617304439 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.301167 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.301277 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:53:56.301241241 +0000 UTC m=+69.617435622 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.390848 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.390903 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.390922 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.390948 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.390970 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.402562 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.402657 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.402833 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.402869 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.402888 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.402974 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:54:12.402948137 +0000 UTC m=+85.719142468 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.402985 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.403067 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.403088 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.403297 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:54:12.403237154 +0000 UTC m=+85.719431455 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.494238 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.494288 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.494304 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.494330 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.494346 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.544292 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.544346 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.544403 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.544556 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.544643 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.544655 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.544834 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:40 crc kubenswrapper[4783]: E1002 10:53:40.544854 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.597208 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.597265 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.597285 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.597309 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.597328 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.699746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.699784 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.699803 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.699826 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.699844 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.803051 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.803121 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.803138 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.803160 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.803176 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.907908 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.907967 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.907997 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.908026 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:40 crc kubenswrapper[4783]: I1002 10:53:40.908048 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:40Z","lastTransitionTime":"2025-10-02T10:53:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.011158 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.011215 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.011232 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.011254 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.011271 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.113830 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.113885 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.113902 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.113926 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.113945 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.217380 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.217497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.217523 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.217557 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.217581 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.320893 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.320951 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.320972 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.320996 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.321013 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.425022 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.425096 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.425118 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.425148 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.425171 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.528034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.528071 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.528082 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.528099 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.528110 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.630328 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.630373 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.630385 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.630401 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.630428 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.732173 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.732239 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.732251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.732289 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.732303 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.835296 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.835352 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.835370 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.835391 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.835446 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.937327 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.937354 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.937362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.937374 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:41 crc kubenswrapper[4783]: I1002 10:53:41.937383 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:41Z","lastTransitionTime":"2025-10-02T10:53:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.039522 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.039913 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.040008 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.040099 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.040177 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.143474 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.143570 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.143588 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.143612 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.143630 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.246649 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.246723 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.246745 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.246777 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.246799 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.350112 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.350470 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.350685 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.350822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.350958 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.453957 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.454016 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.454033 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.454060 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.454077 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.544259 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:42 crc kubenswrapper[4783]: E1002 10:53:42.544462 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.544705 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.544789 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:42 crc kubenswrapper[4783]: E1002 10:53:42.544904 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.544737 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:42 crc kubenswrapper[4783]: E1002 10:53:42.545059 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:42 crc kubenswrapper[4783]: E1002 10:53:42.545192 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.556930 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.556978 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.556993 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.557014 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.557031 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.616159 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.627797 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.631272 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.644455 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.659699 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.659777 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.659800 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.659832 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.659854 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.673651 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.688617 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.702606 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.716014 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.728628 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.736779 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.748253 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.759533 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.762119 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.762151 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.762160 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.762173 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.762182 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.770888 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.784534 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.803995 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.818168 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.835054 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.865228 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.865299 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.865321 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.865351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.865374 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.865453 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.879007 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.879712 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.893227 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.911166 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.928246 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.949627 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.967981 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.968021 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.968032 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.968048 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.968059 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:42Z","lastTransitionTime":"2025-10-02T10:53:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.970888 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.983829 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:42 crc kubenswrapper[4783]: I1002 10:53:42.999587 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:42Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.022273 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.038014 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.065033 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.069839 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.069886 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.069899 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.069919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.069934 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.082517 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.095593 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.118784 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.138448 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.154586 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.171321 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.171660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.171712 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.171729 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.171752 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.171772 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.181899 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.194496 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:43Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.274277 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.274326 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.274342 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.274364 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.274380 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.377264 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.377316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.377331 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.377351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.377366 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.480393 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.480450 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.480464 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.480480 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.480491 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.583036 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.583081 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.583098 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.583120 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.583137 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.686684 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.687040 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.687195 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.687340 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.687520 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.791042 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.791108 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.791127 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.791155 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.791173 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.894457 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.894525 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.894543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.894569 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.894587 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.997976 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.998038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.998060 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.998084 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:43 crc kubenswrapper[4783]: I1002 10:53:43.998101 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:43Z","lastTransitionTime":"2025-10-02T10:53:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.100735 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.100790 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.100807 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.100830 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.100847 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.204056 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.204105 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.204124 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.204147 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.204163 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.306354 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.306460 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.306486 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.306517 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.306542 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.409606 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.409681 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.409704 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.409753 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.409773 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.513157 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.513245 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.513267 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.513289 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.513309 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.543863 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.543963 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:44 crc kubenswrapper[4783]: E1002 10:53:44.544041 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.543888 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:44 crc kubenswrapper[4783]: E1002 10:53:44.544152 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.544248 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:44 crc kubenswrapper[4783]: E1002 10:53:44.544494 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:44 crc kubenswrapper[4783]: E1002 10:53:44.544618 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.615804 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.615851 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.615864 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.615886 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.615902 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.719001 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.719085 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.719109 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.719139 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.719161 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.821950 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.822032 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.822066 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.822099 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.822120 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.925239 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.925321 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.925344 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.925376 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.925398 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.994175 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.994249 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.994273 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.994304 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:44 crc kubenswrapper[4783]: I1002 10:53:44.994325 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:44Z","lastTransitionTime":"2025-10-02T10:53:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: E1002 10:53:45.014798 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:45Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.021287 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.021378 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.021468 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.021493 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.021511 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: E1002 10:53:45.041923 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:45Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.046856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.046903 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.046919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.046937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.046952 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: E1002 10:53:45.060382 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:45Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.063909 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.063964 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.064004 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.064024 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.064038 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: E1002 10:53:45.078251 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:45Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.082405 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.082502 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.082520 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.082540 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.082553 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: E1002 10:53:45.095843 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:45Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:45 crc kubenswrapper[4783]: E1002 10:53:45.096219 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.098022 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.098070 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.098087 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.098105 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.098120 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.201483 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.201533 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.201551 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.201577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.201595 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.305507 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.305576 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.305596 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.305623 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.305647 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.417315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.417391 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.417471 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.417509 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.417534 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.521345 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.521391 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.521403 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.521438 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.521454 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.624910 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.624950 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.624961 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.624978 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.624990 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.728159 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.728218 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.728232 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.728248 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.728258 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.831647 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.831725 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.831746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.831772 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.831789 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.934672 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.934719 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.934750 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.934770 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:45 crc kubenswrapper[4783]: I1002 10:53:45.934781 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:45Z","lastTransitionTime":"2025-10-02T10:53:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.038044 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.038126 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.038145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.038170 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.038191 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.141535 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.141595 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.141613 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.141636 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.141657 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.244884 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.244966 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.244990 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.245019 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.245047 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.348301 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.348345 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.348354 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.348367 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.348376 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.451315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.451381 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.451400 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.451461 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.451493 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.544672 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.544736 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.544816 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.544685 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:46 crc kubenswrapper[4783]: E1002 10:53:46.544949 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:46 crc kubenswrapper[4783]: E1002 10:53:46.545082 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:46 crc kubenswrapper[4783]: E1002 10:53:46.545207 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:46 crc kubenswrapper[4783]: E1002 10:53:46.545326 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.554141 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.554195 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.554212 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.554233 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.554250 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.657899 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.658000 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.658023 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.658047 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.658065 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.761573 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.761661 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.761685 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.761713 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.761735 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.864329 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.864396 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.864474 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.864510 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.864532 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.968185 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.968245 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.968262 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.968287 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:46 crc kubenswrapper[4783]: I1002 10:53:46.968305 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:46Z","lastTransitionTime":"2025-10-02T10:53:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.071745 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.071791 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.071808 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.071833 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.071850 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.175002 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.175074 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.175099 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.175130 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.175150 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.278578 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.278668 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.278693 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.278727 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.278752 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.382146 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.382232 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.382249 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.382274 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.382293 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.484729 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.484776 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.484789 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.484805 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.484814 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.560309 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.573273 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.586224 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.588158 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.588790 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.588856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.588891 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.588947 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.600102 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.612072 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.624705 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.639034 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.657784 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.690314 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.691937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.691970 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.691981 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.692000 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.692011 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.711400 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.723323 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.733583 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.742541 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.773039 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.794246 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.794441 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.794469 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.794501 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.794537 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.798153 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.818638 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.839647 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.859209 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:47Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.904356 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.904477 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.904504 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.904534 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:47 crc kubenswrapper[4783]: I1002 10:53:47.904557 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:47Z","lastTransitionTime":"2025-10-02T10:53:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.007564 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.007956 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.008100 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.008245 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.008364 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.111650 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.111710 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.111727 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.111752 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.111772 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.215201 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.215261 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.215282 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.215306 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.215325 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.318581 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.318641 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.318660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.318684 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.318703 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.421739 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.421785 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.421802 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.421823 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.421840 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.524684 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.524741 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.524757 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.524779 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.524796 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.544160 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.544181 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.544296 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.544376 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:48 crc kubenswrapper[4783]: E1002 10:53:48.544362 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:48 crc kubenswrapper[4783]: E1002 10:53:48.544591 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:48 crc kubenswrapper[4783]: E1002 10:53:48.544709 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:48 crc kubenswrapper[4783]: E1002 10:53:48.544789 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.627733 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.627796 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.627813 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.627837 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.627856 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.730392 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.730483 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.730502 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.730528 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.730547 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.833921 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.834019 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.834043 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.834074 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.834096 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.937536 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.937620 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.937644 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.937716 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:48 crc kubenswrapper[4783]: I1002 10:53:48.937743 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:48Z","lastTransitionTime":"2025-10-02T10:53:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.041405 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.041507 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.041525 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.041549 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.041566 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.145011 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.145074 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.145095 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.145122 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.145140 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.248386 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.248497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.248515 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.248538 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.248554 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.351103 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.351138 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.351154 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.351168 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.351179 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.454629 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.454691 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.454708 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.454731 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.454750 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.558127 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.558256 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.558329 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.558368 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.558468 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.662839 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.663626 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.663663 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.663692 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.663710 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.766930 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.766986 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.767003 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.767024 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.767041 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.869779 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.869828 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.869844 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.869884 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.869902 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.973601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.973900 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.974079 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.974221 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:49 crc kubenswrapper[4783]: I1002 10:53:49.974365 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:49Z","lastTransitionTime":"2025-10-02T10:53:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.077202 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.077388 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.077438 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.077469 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.077492 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.180384 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.180470 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.180483 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.180497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.180508 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.282802 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.282854 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.282870 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.282894 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.282910 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.386520 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.386591 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.386611 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.386638 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.386656 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.490142 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.490204 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.490219 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.490250 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.490265 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.544127 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.544245 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:50 crc kubenswrapper[4783]: E1002 10:53:50.544276 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.544330 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:50 crc kubenswrapper[4783]: E1002 10:53:50.544396 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.544456 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:50 crc kubenswrapper[4783]: E1002 10:53:50.544591 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:50 crc kubenswrapper[4783]: E1002 10:53:50.544747 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.545712 4783 scope.go:117] "RemoveContainer" containerID="9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389" Oct 02 10:53:50 crc kubenswrapper[4783]: E1002 10:53:50.545905 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.597179 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.597236 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.597253 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.597276 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.597293 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.699655 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.699703 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.699720 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.699743 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.699760 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.805065 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.805115 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.805126 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.805147 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.805161 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.912907 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.912974 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.912999 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.913026 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:50 crc kubenswrapper[4783]: I1002 10:53:50.913047 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:50Z","lastTransitionTime":"2025-10-02T10:53:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.016239 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.016303 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.016316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.016337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.016350 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.119581 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.119640 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.119656 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.119681 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.119700 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.223313 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.223359 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.223377 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.223397 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.223433 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.326445 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.326478 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.326487 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.326504 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.326513 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.431023 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.431478 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.431726 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.431921 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.432159 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.534942 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.534999 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.535014 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.535034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.535047 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.637626 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.637694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.637711 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.637734 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.637751 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.740091 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.740150 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.740167 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.740192 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.740209 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.842489 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.842539 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.842549 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.842564 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.842574 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.944323 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.944359 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.944371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.944387 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:51 crc kubenswrapper[4783]: I1002 10:53:51.944401 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:51Z","lastTransitionTime":"2025-10-02T10:53:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.046997 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.047081 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.047093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.047110 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.047123 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.149866 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.149910 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.149926 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.149947 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.149963 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.251944 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.251976 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.251986 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.252000 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.252011 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.355138 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.355237 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.355255 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.355280 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.355300 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.457452 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.457495 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.457532 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.457546 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.457557 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.544084 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.544154 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:52 crc kubenswrapper[4783]: E1002 10:53:52.544303 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.544332 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.544372 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:52 crc kubenswrapper[4783]: E1002 10:53:52.544456 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:52 crc kubenswrapper[4783]: E1002 10:53:52.544497 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:52 crc kubenswrapper[4783]: E1002 10:53:52.544670 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.560036 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.560070 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.560078 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.560090 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.560098 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.662448 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.662486 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.662512 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.662528 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.662539 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.764714 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.764753 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.764765 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.764779 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.764790 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.867566 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.867594 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.867603 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.867615 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.867625 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.969771 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.969808 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.969820 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.969833 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:52 crc kubenswrapper[4783]: I1002 10:53:52.969841 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:52Z","lastTransitionTime":"2025-10-02T10:53:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.072523 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.072566 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.072576 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.072591 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.072605 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.174748 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.174796 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.174809 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.174841 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.174854 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.279378 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.279439 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.279453 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.279469 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.279486 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.381801 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.381860 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.381870 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.381887 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.381897 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.484786 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.485027 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.485151 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.485289 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.485369 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.587215 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.587259 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.587270 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.587287 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.587298 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.689171 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.689219 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.689232 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.689251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.689263 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.791700 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.791735 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.791748 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.791775 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.791788 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.894257 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.894299 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.894312 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.894328 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.894340 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.996241 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.996279 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.996288 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.996309 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:53 crc kubenswrapper[4783]: I1002 10:53:53.996325 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:53Z","lastTransitionTime":"2025-10-02T10:53:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.098962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.099002 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.099013 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.099030 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.099055 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.200786 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.200837 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.200846 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.200858 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.200866 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.303395 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.303462 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.303474 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.303491 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.303505 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.406723 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.406772 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.406787 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.406807 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.406820 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.509481 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.509568 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.509589 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.509615 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.509636 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.544404 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.544406 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.544406 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:54 crc kubenswrapper[4783]: E1002 10:53:54.544532 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.544561 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:54 crc kubenswrapper[4783]: E1002 10:53:54.544745 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:54 crc kubenswrapper[4783]: E1002 10:53:54.544770 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:54 crc kubenswrapper[4783]: E1002 10:53:54.544815 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.612783 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.612824 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.612835 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.612851 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.612862 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.715454 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.715512 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.715521 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.715535 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.715544 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.817519 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.818365 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.818494 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.818599 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.818702 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.920786 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.920822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.920833 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.920849 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:54 crc kubenswrapper[4783]: I1002 10:53:54.920862 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:54Z","lastTransitionTime":"2025-10-02T10:53:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.023162 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.023200 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.023209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.023224 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.023234 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.125821 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.125913 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.125933 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.125959 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.126008 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.228706 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.228782 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.228796 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.228812 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.228850 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.330977 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.331025 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.331037 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.331055 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.331067 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.351615 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.351678 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.351691 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.351711 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.351722 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: E1002 10:53:55.362468 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:55Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.365607 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.365635 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.365644 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.365657 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.365665 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: E1002 10:53:55.376564 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:55Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.379656 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.379691 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.379702 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.379718 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.379728 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: E1002 10:53:55.393886 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:55Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.399445 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.399491 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.399503 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.399520 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.399532 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: E1002 10:53:55.411192 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:55Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.414771 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.414808 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.414818 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.414850 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.414860 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: E1002 10:53:55.426799 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:55Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:55Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:55 crc kubenswrapper[4783]: E1002 10:53:55.426907 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.433121 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.433154 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.433164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.433178 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.433188 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.536436 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.536481 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.536493 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.536510 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.536522 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.639598 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.639661 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.639673 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.639714 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.639728 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.742549 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.742613 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.742632 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.742657 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.742673 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.845507 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.845542 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.845556 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.845574 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.845586 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.948397 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.948434 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.948442 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.948455 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:55 crc kubenswrapper[4783]: I1002 10:53:55.948465 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:55Z","lastTransitionTime":"2025-10-02T10:53:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.051677 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.051752 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.051769 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.051799 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.051817 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.154349 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.154442 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.154464 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.154491 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.154515 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.257462 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.257533 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.257551 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.257573 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.257589 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.360338 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.360679 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.360855 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.361024 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.361160 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.374964 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:56 crc kubenswrapper[4783]: E1002 10:53:56.375082 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:56 crc kubenswrapper[4783]: E1002 10:53:56.375131 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:54:28.375115299 +0000 UTC m=+101.691309560 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.463797 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.463986 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.464071 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.464137 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.464202 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.544459 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.544513 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.544532 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:56 crc kubenswrapper[4783]: E1002 10:53:56.544572 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.544600 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:56 crc kubenswrapper[4783]: E1002 10:53:56.544651 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:56 crc kubenswrapper[4783]: E1002 10:53:56.544735 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:56 crc kubenswrapper[4783]: E1002 10:53:56.544811 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.566942 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.566987 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.567004 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.567023 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.567037 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.669494 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.669542 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.669556 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.669574 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.669586 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.772725 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.772764 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.772775 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.772795 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.772808 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.875873 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.875901 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.875909 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.875923 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.875933 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.978376 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.978456 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.978473 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.978495 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:56 crc kubenswrapper[4783]: I1002 10:53:56.978510 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:56Z","lastTransitionTime":"2025-10-02T10:53:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.081122 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.081164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.081176 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.081194 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.081205 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.184124 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.184161 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.184172 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.184187 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.184199 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.286307 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.286344 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.286353 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.286371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.286385 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.388184 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.388229 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.388242 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.388264 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.388279 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.490907 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.490969 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.490985 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.491039 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.491054 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.560468 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.575201 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.585119 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.593502 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.593544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.593556 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.593573 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.593584 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.596331 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.609463 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.623000 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.632736 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.644241 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.654322 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.679219 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.688775 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.696716 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.696763 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.696777 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.696799 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.696814 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.697030 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.713961 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.729608 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.744716 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.756016 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.765076 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.774922 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:57Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.798830 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.798860 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.798868 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.798881 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.798889 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.900633 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.900678 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.900704 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.900717 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.900727 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:57Z","lastTransitionTime":"2025-10-02T10:53:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.999455 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/0.log" Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.999524 4783 generic.go:334] "Generic (PLEG): container finished" podID="f6c8d5bc-163f-401f-bdc5-4625112dced9" containerID="66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7" exitCode=1 Oct 02 10:53:57 crc kubenswrapper[4783]: I1002 10:53:57.999556 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerDied","Data":"66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.000051 4783 scope.go:117] "RemoveContainer" containerID="66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.004778 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.004808 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.004817 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.004831 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.004839 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.012779 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.024501 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.041010 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.052801 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.065306 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.073926 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.090760 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.106262 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.107061 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.107080 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.107088 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.107102 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.107114 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.121925 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.136125 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.149009 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.157695 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.166495 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.176859 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.188403 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.200111 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.208798 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.208846 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.208877 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.208896 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.208906 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.214145 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.228060 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:58Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.311887 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.311979 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.311992 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.312007 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.312017 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.414680 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.414718 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.414726 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.414740 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.414751 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.516719 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.516747 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.516755 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.516766 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.516775 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.544555 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:53:58 crc kubenswrapper[4783]: E1002 10:53:58.544668 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.544972 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:53:58 crc kubenswrapper[4783]: E1002 10:53:58.545042 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.545485 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:53:58 crc kubenswrapper[4783]: E1002 10:53:58.545554 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.545601 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:53:58 crc kubenswrapper[4783]: E1002 10:53:58.545652 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.553326 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.619277 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.619316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.619327 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.619344 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.619354 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.722576 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.722620 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.722635 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.722654 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.722668 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.825529 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.825568 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.825579 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.825596 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.825609 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.927746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.927798 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.927808 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.927823 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:58 crc kubenswrapper[4783]: I1002 10:53:58.927836 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:58Z","lastTransitionTime":"2025-10-02T10:53:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.004089 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/0.log" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.004152 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerStarted","Data":"b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.018864 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.030817 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.030861 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.030873 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.030892 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.030906 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.031923 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.046220 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.056776 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.067267 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.075164 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.085479 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.097231 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.109120 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02a1442d-9aca-4931-b631-da78a187f511\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.121055 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.133112 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.133147 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.133160 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.133176 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.133187 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.136440 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.146683 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.156893 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.173270 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.188305 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.198651 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.209138 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.220200 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.229613 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:53:59Z is after 2025-08-24T17:21:41Z" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.235341 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.235371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.235382 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.235396 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.235405 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.338252 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.338282 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.338290 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.338303 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.338316 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.441192 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.441225 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.441236 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.441251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.441262 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.543693 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.543934 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.544036 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.544128 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.544187 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.646728 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.647033 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.647119 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.647212 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.647303 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.749110 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.749148 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.749158 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.749175 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.749186 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.852011 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.852309 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.852447 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.852556 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.852642 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.955118 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.955748 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.955874 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.955963 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:53:59 crc kubenswrapper[4783]: I1002 10:53:59.956053 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:53:59Z","lastTransitionTime":"2025-10-02T10:53:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.058862 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.059282 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.059514 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.059693 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.059829 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.162609 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.162921 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.163024 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.163122 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.163207 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.265936 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.266372 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.266626 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.266803 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.266936 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.369557 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.369595 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.369606 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.369622 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.369633 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.472247 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.472290 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.472306 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.472326 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.472343 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.544024 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.544116 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.544184 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.544047 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:00 crc kubenswrapper[4783]: E1002 10:54:00.544223 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:00 crc kubenswrapper[4783]: E1002 10:54:00.544336 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:00 crc kubenswrapper[4783]: E1002 10:54:00.544476 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:00 crc kubenswrapper[4783]: E1002 10:54:00.544568 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.574267 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.574294 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.574303 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.574316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.574325 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.676488 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.676519 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.676527 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.676538 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.676547 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.779179 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.779247 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.779270 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.779298 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.779316 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.882750 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.882789 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.882799 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.882813 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.882823 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.985018 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.985051 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.985060 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.985071 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:00 crc kubenswrapper[4783]: I1002 10:54:00.985080 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:00Z","lastTransitionTime":"2025-10-02T10:54:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.087760 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.087793 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.087804 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.087823 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.087836 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.189802 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.189824 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.189833 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.189846 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.189854 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.292712 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.292745 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.292756 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.292770 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.292784 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.395706 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.395746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.395757 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.395772 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.395784 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.497868 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.497930 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.497943 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.497960 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.497972 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.601979 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.602065 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.602076 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.602095 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.602113 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.705639 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.705697 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.705715 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.705739 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.705756 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.808857 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.808920 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.808939 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.808963 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.808981 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.912241 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.912309 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.912322 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.912340 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:01 crc kubenswrapper[4783]: I1002 10:54:01.912352 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:01Z","lastTransitionTime":"2025-10-02T10:54:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.015540 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.015608 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.015627 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.015654 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.015672 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.118602 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.118639 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.118650 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.118664 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.118675 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.221298 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.221352 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.221370 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.221393 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.221450 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.324499 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.324612 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.324694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.324719 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.324750 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.427081 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.427130 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.427145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.427166 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.427188 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.529787 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.529836 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.529850 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.529867 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.529878 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.544315 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.544355 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.544402 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:02 crc kubenswrapper[4783]: E1002 10:54:02.544516 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.544561 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:02 crc kubenswrapper[4783]: E1002 10:54:02.544707 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:02 crc kubenswrapper[4783]: E1002 10:54:02.544758 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:02 crc kubenswrapper[4783]: E1002 10:54:02.544857 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.631946 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.632008 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.632025 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.632048 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.632066 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.735684 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.735746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.735762 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.735784 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.735801 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.839407 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.839839 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.839977 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.840114 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.840242 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.943091 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.943689 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.943885 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.944043 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:02 crc kubenswrapper[4783]: I1002 10:54:02.944179 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:02Z","lastTransitionTime":"2025-10-02T10:54:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.046167 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.046199 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.046225 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.046239 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.046248 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.148991 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.149045 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.149075 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.149095 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.149110 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.252576 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.252642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.252660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.252683 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.252701 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.355293 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.355339 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.355356 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.355378 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.355395 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.458510 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.458579 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.458604 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.458635 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.458653 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.545697 4783 scope.go:117] "RemoveContainer" containerID="9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.561833 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.561896 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.561912 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.561937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.561954 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.664587 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.664651 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.664667 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.664694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.664716 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.768334 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.768396 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.768437 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.768465 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.768481 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.871812 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.871924 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.871944 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.872017 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.872037 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.983671 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.983734 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.983752 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.983777 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:03 crc kubenswrapper[4783]: I1002 10:54:03.983796 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:03Z","lastTransitionTime":"2025-10-02T10:54:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.022576 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/2.log" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.025657 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.026891 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.055466 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.076940 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.086198 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.086237 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.086268 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.086286 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.086297 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.090793 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02a1442d-9aca-4931-b631-da78a187f511\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.108157 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.127679 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.138649 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.148582 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.165848 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.177049 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.186888 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.188487 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.188527 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.188539 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.188556 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.188567 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.200488 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.211005 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.221954 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.233082 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.245973 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.257380 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.269321 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.280510 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.289648 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:04Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.291144 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.291171 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.291193 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.291209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.291218 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.393823 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.393866 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.393875 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.393893 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.393903 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.496720 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.496745 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.496754 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.496766 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.496777 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.544065 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:04 crc kubenswrapper[4783]: E1002 10:54:04.544236 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.544224 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.544678 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:04 crc kubenswrapper[4783]: E1002 10:54:04.544684 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.544750 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:04 crc kubenswrapper[4783]: E1002 10:54:04.544785 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:04 crc kubenswrapper[4783]: E1002 10:54:04.544906 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.599989 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.600065 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.600089 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.600118 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.600140 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.703351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.703395 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.703444 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.703467 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.703483 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.806337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.806382 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.806397 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.806442 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.806459 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.909242 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.909284 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.909293 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.909306 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:04 crc kubenswrapper[4783]: I1002 10:54:04.909317 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:04Z","lastTransitionTime":"2025-10-02T10:54:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.012208 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.012284 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.012311 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.012349 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.012377 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.034661 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/3.log" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.035680 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/2.log" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.041787 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" exitCode=1 Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.041861 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.041914 4783 scope.go:117] "RemoveContainer" containerID="9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.044313 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 10:54:05 crc kubenswrapper[4783]: E1002 10:54:05.045517 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.065692 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.093280 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9e93788b69b702863377e8cddbd66bfba5faad661fd439898259b9917becf389\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:36Z\\\",\\\"message\\\":\\\"ed as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:c94130be-172c-477c-88c4-40cc7eba30fe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {eb8eef51-1a8d-43f9-ae2e-3b2cc00ded60}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417880 6343 model_client.go:382] Update operations generated as: [{Op:update Table:NAT Row:map[external_ip:192.168.126.11 logical_ip:10.217.0.59 options:{GoMap:map[stateless:false]} type:snat] Rows:[] Columns:[] Mutations:[] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {dce28c51-c9f1-478b-97c8-7e209d6e7cbe}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1002 10:53:36.417897 6343 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nI1002 10:53:36.417918 6343 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Logical_Router Row:map[] Rows:[] Columns:[] Mutations:[{Column:nat Mutator:insert Value:{GoSet:[{GoUUID:dce28c51-c9f1-478b-97c8-7e209d6e7cbe}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {e3c4661a-36a6-47f0-a6c0-a4ee741f2224}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nF1002 10:53:36.417978 6343 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:54:04Z\\\",\\\"message\\\":\\\"] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-jqvp2\\\\nI1002 10:54:04.476900 6682 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476770 6682 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1002 10:54:04.476876 6682 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 in node crc\\\\nI1002 10:54:04.476919 6682 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476924 6682 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 after 0 failed attempt(s)\\\\nF1002 10:54:04.476929 6682 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:54:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.111116 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.115588 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.115663 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.115689 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.115721 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.115747 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.130476 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.143110 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02a1442d-9aca-4931-b631-da78a187f511\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.156930 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.173457 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.190585 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.204664 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.217332 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.218454 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.218503 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.218519 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.218544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.218559 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.236629 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.250101 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.263762 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.277364 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.291510 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.303001 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.316503 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.320246 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.320289 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.320300 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.320317 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.320332 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.333871 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.350815 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.423654 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.423709 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.423727 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.423750 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.423769 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.527089 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.527159 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.527178 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.527202 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.527220 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.630934 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.631001 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.631022 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.631048 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.631071 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.680487 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.680564 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.680586 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.680618 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.680641 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: E1002 10:54:05.702915 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.708661 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.708736 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.708760 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.708789 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.708810 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: E1002 10:54:05.731021 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.737552 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.737621 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.737644 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.737674 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.737699 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: E1002 10:54:05.760266 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.765371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.765479 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.765508 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.765544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.765569 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: E1002 10:54:05.784562 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.789726 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.789757 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.789771 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.789792 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.789807 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: E1002 10:54:05.807226 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:05Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:05Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:05 crc kubenswrapper[4783]: E1002 10:54:05.807450 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.809676 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.809746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.809766 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.809789 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.809808 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.912521 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.912577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.912594 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.912618 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:05 crc kubenswrapper[4783]: I1002 10:54:05.912638 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:05Z","lastTransitionTime":"2025-10-02T10:54:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.015334 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.015369 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.015380 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.015396 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.015406 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.047928 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/3.log" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.053695 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 10:54:06 crc kubenswrapper[4783]: E1002 10:54:06.054009 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.091317 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:54:04Z\\\",\\\"message\\\":\\\"] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-jqvp2\\\\nI1002 10:54:04.476900 6682 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476770 6682 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1002 10:54:04.476876 6682 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 in node crc\\\\nI1002 10:54:04.476919 6682 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476924 6682 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 after 0 failed attempt(s)\\\\nF1002 10:54:04.476929 6682 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:54:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.108165 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.119263 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.119306 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.119315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.119333 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.119343 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.123382 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.139152 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02a1442d-9aca-4931-b631-da78a187f511\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.160826 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.186258 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.206877 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.222664 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.222724 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.222741 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.222766 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.222784 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.226477 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.242155 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.267596 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.285877 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.301377 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.315983 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.325561 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.325644 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.325668 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.325699 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.325722 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.330889 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.345682 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.363630 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.384304 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.406182 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.428444 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.428495 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.428506 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.428523 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.428535 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.429610 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:06Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.531771 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.531814 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.531825 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.531842 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.531854 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.544384 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.544446 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.544482 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:06 crc kubenswrapper[4783]: E1002 10:54:06.544511 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:06 crc kubenswrapper[4783]: E1002 10:54:06.544618 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.544764 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:06 crc kubenswrapper[4783]: E1002 10:54:06.544785 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:06 crc kubenswrapper[4783]: E1002 10:54:06.544850 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.634763 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.634815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.634829 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.634847 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.634863 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.738188 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.738262 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.738286 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.738317 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.738338 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.841814 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.841877 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.841894 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.841919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.841936 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.945106 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.945179 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.945203 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.945234 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:06 crc kubenswrapper[4783]: I1002 10:54:06.945257 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:06Z","lastTransitionTime":"2025-10-02T10:54:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.049053 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.049122 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.049138 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.049164 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.049182 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.151659 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.151732 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.151749 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.151776 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.151797 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.254952 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.255051 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.255069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.255091 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.255112 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.359380 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.360294 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.360641 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.360856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.361016 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.464504 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.464573 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.464652 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.464743 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.464766 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.567326 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.567368 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.567377 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.567392 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.567402 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.569131 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.604645 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:54:04Z\\\",\\\"message\\\":\\\"] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-jqvp2\\\\nI1002 10:54:04.476900 6682 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476770 6682 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1002 10:54:04.476876 6682 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 in node crc\\\\nI1002 10:54:04.476919 6682 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476924 6682 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 after 0 failed attempt(s)\\\\nF1002 10:54:04.476929 6682 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:54:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.624100 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.637061 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.651900 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02a1442d-9aca-4931-b631-da78a187f511\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.669243 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.669298 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.669316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.669339 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.669356 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.680727 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.704590 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.718710 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.731800 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.742244 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.763886 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.773892 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.773924 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.773933 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.773947 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.773957 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.777007 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.789789 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.801644 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.815882 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.827060 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.839635 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.858301 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.872106 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:07Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.875876 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.875898 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.875907 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.875919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.875929 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.979143 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.979205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.979221 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.979241 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:07 crc kubenswrapper[4783]: I1002 10:54:07.979257 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:07Z","lastTransitionTime":"2025-10-02T10:54:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.082400 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.082484 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.082501 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.082524 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.082542 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.185724 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.185792 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.185809 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.185832 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.185848 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.289343 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.289753 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.289770 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.289795 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.289812 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.392751 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.392809 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.392826 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.392851 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.392869 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.497635 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.497991 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.498185 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.498336 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.498511 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.543943 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.543976 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.543988 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:08 crc kubenswrapper[4783]: E1002 10:54:08.544116 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:08 crc kubenswrapper[4783]: E1002 10:54:08.544249 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:08 crc kubenswrapper[4783]: E1002 10:54:08.544386 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.544809 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:08 crc kubenswrapper[4783]: E1002 10:54:08.545112 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.601746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.602138 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.602296 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.602484 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.602642 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.706868 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.706925 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.706946 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.706974 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.706996 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.810270 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.810327 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.810338 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.810352 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.810361 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.913478 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.913724 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.913736 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.913809 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:08 crc kubenswrapper[4783]: I1002 10:54:08.913823 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:08Z","lastTransitionTime":"2025-10-02T10:54:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.016577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.016619 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.016630 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.016647 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.016659 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.119759 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.119822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.119839 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.119863 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.119880 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.223395 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.223490 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.223515 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.223543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.223564 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.327271 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.327754 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.327937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.328093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.328235 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.431357 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.431476 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.431497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.431519 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.431536 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.534351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.534468 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.534499 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.534530 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.534554 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.637160 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.637206 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.637217 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.637240 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.637254 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.740126 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.740266 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.740350 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.740513 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.740564 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.843534 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.843659 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.843685 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.843728 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.843763 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.946868 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.946923 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.946939 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.946961 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:09 crc kubenswrapper[4783]: I1002 10:54:09.946978 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:09Z","lastTransitionTime":"2025-10-02T10:54:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.050168 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.050220 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.050238 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.050260 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.050279 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.153795 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.153885 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.153903 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.153927 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.153947 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.257951 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.258049 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.258068 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.258092 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.258110 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.361856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.361946 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.361966 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.361994 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.362012 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.466088 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.466166 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.466185 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.466223 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.466243 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.544517 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.544625 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.544717 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:10 crc kubenswrapper[4783]: E1002 10:54:10.544713 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:10 crc kubenswrapper[4783]: E1002 10:54:10.544861 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.544908 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:10 crc kubenswrapper[4783]: E1002 10:54:10.545031 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:10 crc kubenswrapper[4783]: E1002 10:54:10.545147 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.569445 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.569509 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.569528 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.569552 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.569569 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.673076 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.673125 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.673141 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.673165 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.673183 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.775913 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.775993 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.776015 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.776041 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.776064 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.878815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.878866 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.878884 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.878907 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.878926 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.981853 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.981911 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.981929 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.981953 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:10 crc kubenswrapper[4783]: I1002 10:54:10.981972 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:10Z","lastTransitionTime":"2025-10-02T10:54:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.085030 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.085096 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.085113 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.085136 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.085154 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.189192 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.189308 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.189333 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.189362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.189384 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.295114 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.295157 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.295166 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.295189 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.295202 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.398048 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.398130 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.398147 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.398194 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.398211 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.501357 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.501489 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.501516 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.501973 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.502107 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.605648 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.605999 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.606162 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.606316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.606471 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.709389 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.709479 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.709498 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.709520 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.709536 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.812100 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.812145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.812165 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.812187 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.812204 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.915317 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.915370 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.915389 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.915449 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:11 crc kubenswrapper[4783]: I1002 10:54:11.915467 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:11Z","lastTransitionTime":"2025-10-02T10:54:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.018531 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.018590 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.018609 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.018632 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.018649 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.121324 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.121405 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.121477 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.121518 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.121546 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.224403 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.224453 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.224470 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.224487 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.224497 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.327548 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.327617 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.327642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.327672 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.327693 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.353376 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.353615 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.353693 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.353734 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.353695793 +0000 UTC m=+149.669890084 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.353818 4783 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.353921 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.353892308 +0000 UTC m=+149.670086659 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.353948 4783 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.354078 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.354038722 +0000 UTC m=+149.670233023 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.430976 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.431053 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.431082 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.431119 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.431145 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.454958 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.455121 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455160 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455197 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455216 4783 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455297 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.455271256 +0000 UTC m=+149.771465547 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455343 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455379 4783 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455403 4783 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.455539 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.455510322 +0000 UTC m=+149.771704643 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.534115 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.534179 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.534202 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.534230 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.534255 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.544660 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.544740 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.544834 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.544919 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.544678 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.545008 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.545124 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:12 crc kubenswrapper[4783]: E1002 10:54:12.545211 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.637462 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.637534 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.637570 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.637601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.637622 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.741804 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.741872 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.741898 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.741934 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.741956 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.845574 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.845641 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.845667 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.845697 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.845719 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.949009 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.949069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.949085 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.949109 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:12 crc kubenswrapper[4783]: I1002 10:54:12.949127 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:12Z","lastTransitionTime":"2025-10-02T10:54:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.051950 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.051993 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.052006 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.052022 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.052034 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.154129 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.154178 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.154193 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.154217 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.154238 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.257689 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.257777 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.257795 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.257820 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.257839 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.361624 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.361722 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.361774 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.361800 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.361817 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.464975 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.465053 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.465090 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.465126 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.465147 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.568390 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.568563 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.568577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.568597 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.568611 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.671465 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.671509 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.671524 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.671547 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.671562 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.775296 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.775355 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.775372 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.775395 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.775441 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.880478 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.881093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.881119 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.881160 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.881186 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.984514 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.984601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.984616 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.984676 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:13 crc kubenswrapper[4783]: I1002 10:54:13.984685 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:13Z","lastTransitionTime":"2025-10-02T10:54:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.087127 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.087178 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.087195 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.087217 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.087234 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.191185 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.191255 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.191279 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.191310 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.191334 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.294014 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.294076 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.294093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.294115 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.294131 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.397460 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.397506 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.397521 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.397544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.397563 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.501214 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.501586 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.501612 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.501644 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.501668 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.544134 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.544215 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:14 crc kubenswrapper[4783]: E1002 10:54:14.544368 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.544442 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:14 crc kubenswrapper[4783]: E1002 10:54:14.544606 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.544153 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:14 crc kubenswrapper[4783]: E1002 10:54:14.544781 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:14 crc kubenswrapper[4783]: E1002 10:54:14.544976 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.605125 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.605222 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.605239 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.605263 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.605281 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.708915 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.708980 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.708998 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.709022 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.709040 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.812541 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.812572 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.812583 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.812601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.812615 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.915588 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.915641 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.915655 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.915815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:14 crc kubenswrapper[4783]: I1002 10:54:14.915832 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:14Z","lastTransitionTime":"2025-10-02T10:54:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.018832 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.018889 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.018906 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.018932 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.018948 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.121184 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.121253 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.121281 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.121314 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.121337 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.224906 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.224968 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.224992 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.225021 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.225044 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.327611 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.327898 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.327983 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.328092 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.328180 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.430266 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.430315 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.430330 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.430350 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.430366 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.532675 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.532733 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.532756 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.532783 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.532806 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.635272 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.635319 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.635327 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.635341 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.635352 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.738187 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.738228 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.738237 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.738251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.738260 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.840823 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.840894 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.840911 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.840934 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.840952 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.943408 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.943518 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.943541 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.943571 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:15 crc kubenswrapper[4783]: I1002 10:54:15.943597 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:15Z","lastTransitionTime":"2025-10-02T10:54:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.046600 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.046644 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.046656 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.046672 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.046684 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.134580 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.134654 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.134676 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.134704 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.134726 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.154438 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.158990 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.159027 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.159040 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.159057 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.159069 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.177207 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.181438 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.181483 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.181495 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.181514 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.181857 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.197686 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.203848 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.203887 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.203898 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.203914 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.203926 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.217465 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.221082 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.221287 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.221430 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.221575 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.221682 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.235567 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:16Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:16Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.235776 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.237435 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.237539 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.237613 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.237686 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.237749 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.340816 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.340859 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.340868 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.340882 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.340892 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.443956 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.444002 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.444015 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.444032 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.444045 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.544543 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.544573 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.544877 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.544925 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.544954 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.544835 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.545156 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:16 crc kubenswrapper[4783]: E1002 10:54:16.545363 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.546642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.546688 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.546704 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.546726 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.546743 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.649811 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.649878 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.649898 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.649920 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.649937 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.753399 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.753495 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.753514 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.753539 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.753556 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.856568 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.856661 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.856682 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.856705 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.856722 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.959721 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.959759 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.959774 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.959790 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:16 crc kubenswrapper[4783]: I1002 10:54:16.959802 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:16Z","lastTransitionTime":"2025-10-02T10:54:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.062105 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.062145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.062159 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.062194 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.062214 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.165337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.165404 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.165435 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.165452 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.165463 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.268745 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.268789 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.268799 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.268815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.268826 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.371372 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.371475 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.371494 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.371519 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.371536 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.474627 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.474701 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.474720 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.474746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.474803 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.545871 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 10:54:17 crc kubenswrapper[4783]: E1002 10:54:17.546188 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.569912 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.578263 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.578320 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.578337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.578362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.578546 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.593173 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.607861 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.625225 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02a1442d-9aca-4931-b631-da78a187f511\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.646723 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.680269 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:54:04Z\\\",\\\"message\\\":\\\"] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-jqvp2\\\\nI1002 10:54:04.476900 6682 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476770 6682 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1002 10:54:04.476876 6682 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 in node crc\\\\nI1002 10:54:04.476919 6682 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476924 6682 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 after 0 failed attempt(s)\\\\nF1002 10:54:04.476929 6682 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:54:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.680440 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.681039 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.681063 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.681091 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.681109 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.698197 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.717508 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.729255 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.764212 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.784013 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.784114 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.784157 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.784191 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.784215 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.784997 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.805775 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.825066 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.842666 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.858182 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.875752 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.886717 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.886790 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.886815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.886846 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.886879 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.890530 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.906577 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.919957 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:17Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.989626 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.989679 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.989696 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.989721 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:17 crc kubenswrapper[4783]: I1002 10:54:17.989740 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:17Z","lastTransitionTime":"2025-10-02T10:54:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.092268 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.092333 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.092359 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.092408 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.092469 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.194904 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.194968 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.194985 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.195008 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.195026 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.298243 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.298303 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.298325 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.298351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.298372 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.402062 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.402141 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.402165 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.402194 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.402218 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.505941 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.506014 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.506056 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.506082 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.506101 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.544003 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.544093 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:18 crc kubenswrapper[4783]: E1002 10:54:18.544215 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.544244 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:18 crc kubenswrapper[4783]: E1002 10:54:18.544457 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.544582 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:18 crc kubenswrapper[4783]: E1002 10:54:18.544733 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:18 crc kubenswrapper[4783]: E1002 10:54:18.544862 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.608676 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.608724 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.608741 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.608762 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.608779 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.711325 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.711379 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.711396 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.711452 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.711471 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.814596 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.814660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.814684 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.814713 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.814735 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.916918 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.916970 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.916990 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.917039 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:18 crc kubenswrapper[4783]: I1002 10:54:18.917062 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:18Z","lastTransitionTime":"2025-10-02T10:54:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.019543 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.019580 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.019590 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.019607 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.019620 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.122487 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.122582 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.122612 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.122642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.122664 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.225469 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.225508 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.225522 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.225537 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.225549 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.328729 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.328810 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.328828 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.328856 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.328875 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.431629 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.431725 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.431743 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.431765 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.431784 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.534941 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.534968 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.534976 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.534987 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.534995 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.637507 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.637544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.637555 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.637572 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.637583 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.739448 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.739484 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.739494 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.739509 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.739520 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.841979 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.842044 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.842060 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.842083 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.842099 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.945091 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.945145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.945160 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.945186 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:19 crc kubenswrapper[4783]: I1002 10:54:19.945208 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:19Z","lastTransitionTime":"2025-10-02T10:54:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.047816 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.048064 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.048125 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.048154 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.048175 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.150798 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.151152 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.151297 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.151479 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.151605 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.255137 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.255551 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.255737 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.256152 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.256307 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.360051 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.360141 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.360159 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.360182 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.360201 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.463252 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.463330 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.463347 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.463371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.463405 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.544210 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.544286 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.544341 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:20 crc kubenswrapper[4783]: E1002 10:54:20.544545 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.544573 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:20 crc kubenswrapper[4783]: E1002 10:54:20.544677 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:20 crc kubenswrapper[4783]: E1002 10:54:20.544866 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:20 crc kubenswrapper[4783]: E1002 10:54:20.544981 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.565903 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.565995 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.566013 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.566077 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.566094 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.668613 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.668660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.668673 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.668694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.668709 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.772188 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.772236 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.772253 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.772274 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.772290 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.875316 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.875371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.875386 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.875405 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.875448 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.978157 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.978206 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.978218 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.978250 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:20 crc kubenswrapper[4783]: I1002 10:54:20.978259 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:20Z","lastTransitionTime":"2025-10-02T10:54:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.081179 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.081237 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.081254 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.081278 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.081296 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.184787 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.184878 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.184897 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.184962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.184980 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.287725 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.287795 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.287818 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.287846 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.287867 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.390892 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.390952 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.390964 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.390982 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.390994 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.493816 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.493855 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.493867 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.493882 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.493892 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.597459 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.597544 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.597567 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.597597 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.597619 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.700098 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.700168 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.700184 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.700205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.700220 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.803696 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.803757 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.803827 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.803863 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.803886 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.907017 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.907054 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.907062 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.907076 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:21 crc kubenswrapper[4783]: I1002 10:54:21.907084 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:21Z","lastTransitionTime":"2025-10-02T10:54:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.010092 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.010133 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.010146 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.010161 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.010173 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.112960 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.113001 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.113009 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.113021 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.113030 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.214645 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.214683 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.214694 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.214709 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.214724 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.317608 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.317648 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.317658 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.317675 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.317690 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.419888 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.419922 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.419931 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.419945 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.419954 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.521869 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.521919 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.521956 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.521974 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.522007 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.544179 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.544287 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:22 crc kubenswrapper[4783]: E1002 10:54:22.544375 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.544187 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.544644 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:22 crc kubenswrapper[4783]: E1002 10:54:22.544641 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:22 crc kubenswrapper[4783]: E1002 10:54:22.544750 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:22 crc kubenswrapper[4783]: E1002 10:54:22.544824 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.623987 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.624023 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.624034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.624048 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.624059 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.727345 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.727403 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.727457 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.727485 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.727507 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.830513 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.830560 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.830572 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.830589 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.830599 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.934106 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.934158 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.934171 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.934187 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:22 crc kubenswrapper[4783]: I1002 10:54:22.934198 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:22Z","lastTransitionTime":"2025-10-02T10:54:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.036743 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.036815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.036831 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.036848 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.036861 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.139178 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.139209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.139217 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.139232 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.139241 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.241907 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.241950 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.241962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.241981 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.241996 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.344377 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.344441 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.344454 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.344472 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.344484 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.446337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.446386 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.446402 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.446456 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.446474 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.548264 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.548306 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.548321 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.548340 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.548355 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.650252 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.650304 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.650317 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.650337 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.650360 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.752988 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.753054 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.753069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.753090 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.753102 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.855443 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.855481 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.855493 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.855509 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.855521 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.958822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.958864 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.958877 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.958893 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:23 crc kubenswrapper[4783]: I1002 10:54:23.958904 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:23Z","lastTransitionTime":"2025-10-02T10:54:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.061825 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.061882 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.061900 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.061930 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.061952 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.164524 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.164589 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.164609 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.164632 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.164649 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.267971 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.268022 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.268034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.268052 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.268066 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.370701 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.370763 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.370784 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.370813 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.370837 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.474034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.474098 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.474119 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.474145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.474180 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.544655 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.544749 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.544822 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:24 crc kubenswrapper[4783]: E1002 10:54:24.544976 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.545041 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:24 crc kubenswrapper[4783]: E1002 10:54:24.545156 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:24 crc kubenswrapper[4783]: E1002 10:54:24.545252 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:24 crc kubenswrapper[4783]: E1002 10:54:24.545888 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.576830 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.576868 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.576879 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.576894 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.576907 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.679560 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.679623 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.679645 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.679669 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.679687 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.782522 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.782568 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.782581 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.782597 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.782608 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.886108 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.886167 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.886183 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.886205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.886223 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.989266 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.989391 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.989409 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.989466 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:24 crc kubenswrapper[4783]: I1002 10:54:24.989486 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:24Z","lastTransitionTime":"2025-10-02T10:54:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.092634 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.092688 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.092707 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.092732 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.092749 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.195773 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.195822 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.195834 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.195851 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.195863 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.298831 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.298875 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.298885 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.298901 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.298914 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.401564 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.401593 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.401601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.401613 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.401622 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.503707 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.503756 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.503770 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.503793 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.503806 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.606322 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.606389 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.606407 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.606463 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.606481 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.709019 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.709075 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.709086 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.709101 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.709138 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.812450 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.812877 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.813138 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.813374 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.813677 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.916916 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.916962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.916974 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.916993 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:25 crc kubenswrapper[4783]: I1002 10:54:25.917004 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:25Z","lastTransitionTime":"2025-10-02T10:54:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.020104 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.020181 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.020216 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.020246 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.020268 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.122512 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.122552 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.122562 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.122577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.122587 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.225277 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.225333 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.225351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.225380 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.225402 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.328490 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.328539 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.328553 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.328575 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.328589 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.431102 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.431166 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.431183 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.431207 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.431224 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.496277 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.496335 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.496351 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.496403 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.496465 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.512514 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.516135 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.516205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.516228 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.516257 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.516278 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.536082 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.541929 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.541983 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.542000 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.542020 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.542037 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.543907 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.543934 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.543958 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.543939 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.544078 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.544457 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.544572 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.544721 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.562208 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.567552 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.567601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.567618 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.567639 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.567656 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.584106 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.588273 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.588328 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.588346 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.588369 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.588455 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.609076 4783 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"7800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"24148068Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"8\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"24608868Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"lastTransitionTime\\\":\\\"2025-10-02T10:54:26Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"bc187a47-fc71-4069-a609-1fd638044aa7\\\",\\\"systemUUID\\\":\\\"b9763463-2b4a-4924-bf4e-8df5af678b9c\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:26Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:26 crc kubenswrapper[4783]: E1002 10:54:26.609306 4783 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.611501 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.611552 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.611599 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.611632 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.611651 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.714323 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.714382 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.714399 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.714461 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.714480 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.817966 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.818039 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.818063 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.818093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.818117 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.921153 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.921215 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.921251 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.921279 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:26 crc kubenswrapper[4783]: I1002 10:54:26.921300 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:26Z","lastTransitionTime":"2025-10-02T10:54:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.024219 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.024288 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.024312 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.024339 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.024360 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.127743 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.127802 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.127827 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.127854 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.127877 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.230064 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.230105 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.230116 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.230133 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.230146 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.332648 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.332776 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.332801 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.332831 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.332853 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.436229 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.436362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.436384 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.436484 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.436523 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.539260 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.539317 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.539334 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.539358 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.539388 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.556361 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:24Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-scffz\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:24Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-6qbg4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.569424 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02a1442d-9aca-4931-b631-da78a187f511\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://592d6115666eceba7b6853e7f053042c8e55df0085f67fe3193ed56d79e16cff\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://09b174921a65fa7e1f4a51913ab90adbdc791ee1d14b4f498486612f0a66e79d\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.585130 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.603944 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"095cdcdf-1ea0-40da-871a-1223c6737377\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:54:04Z\\\",\\\"message\\\":\\\"] Retry object setup: *v1.Pod openshift-multus/multus-additional-cni-plugins-jqvp2\\\\nI1002 10:54:04.476900 6682 obj_retry.go:303] Retry object setup: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476770 6682 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI1002 10:54:04.476876 6682 ovn.go:134] Ensuring zone local for Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 in node crc\\\\nI1002 10:54:04.476919 6682 obj_retry.go:365] Adding new object: *v1.Pod openshift-multus/network-metrics-daemon-6qbg4\\\\nI1002 10:54:04.476924 6682 obj_retry.go:386] Retry successful for *v1.Pod openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8 after 0 failed attempt(s)\\\\nF1002 10:54:04.476929 6682 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:54:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-hhvtk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-qmd84\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.616221 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aef16d25-482a-4a0f-91e0-b67cdb92c4ed\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:22Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://108796d4fc46b7aa41272bcdc4d1ce172e8cf057bde9f21171ff373264b986cc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://50f25f7edd0387d10c6125f930f5e278511723be9a92210788ad55b6ec6e956d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:23Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-ppwc7\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:22Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-2q7k8\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.627740 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3288cc82-59a8-408e-8b0e-b5255882b4fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d07b963cb479fc49309f30652bcc2566fd49cd3c7fa53fc79aaf82a9a4b27eab\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rplhm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-2j8rt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.638567 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-vxpvq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0781b010-f65f-4e49-9d78-48eda11666fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://feae8da7cb436d7753a9fce729106db7025bd28da1a5d4fb2e67ac07120daac8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wbk2x\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:12Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-vxpvq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.641563 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.641591 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.641601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.641617 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.641627 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.663445 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b9d7cf9d-8522-4a62-aaf3-1579864e50f3\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8b2e414bb5345d3aae7e32d912acf6d7ede59c1f47e1474caf4736663c733d2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://d8d3ec78a66bfaf15222ed50fe81f042cbf0b9aa5ecc8fb175b0efc8bc1bccbb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6a03f92fd7d8f5e7193682a5f1db78e947bea4135976b5db6d87842634f0c4d2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4edada94eb053469810a747a5d1bf8fe2c2da095e5c944dd2cd5ab7846faf5ef\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ebdc8ed0cab8049a9f6220e6069291fd7a0e6c2b97e0442e3221549cbcc889f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:51Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://16d51ed74b98b52720a5f2689ec3da701736e244bdee0394b7ceeb1ce764ef6b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7556a11d41ad380d0733b57cd39cf5f3e973895a82b5e79dc9c4fcb326f1dae0\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://6db05e69eac9a6f3d0ee3bd5519de95e51e0f44a417ab861a4d8e53dfbc542f1\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:51Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.679353 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"2054147b-696c-471a-9602-db747325cae2\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d954f0dc3572c180556ccde9a4c429359575e5418fb786fc5df6987f8c326075\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://6e521d7072220c202109a67fe59ebebe0db8bd877d02d9805324922eb3bf45d6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fdb33b77ecaf413bc6e4aeb0f4358b12cd0f3fa6e01602673a518f5651e4bac6\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://414652a9b63f678601226313a42b29f5e146de06f854c1f63ed8d408ea9f3553\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ecd51ffb6055c56e36d3ba797ba660951ede6ee4cb3979b9ce7fb1703e7ee58a\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-10-02T10:53:07Z\\\",\\\"message\\\":\\\"file observer\\\\nW1002 10:53:07.776642 1 builder.go:272] unable to get owner reference (falling back to namespace): pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\nI1002 10:53:07.776950 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1002 10:53:07.778492 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-420939118/tls.crt::/tmp/serving-cert-420939118/tls.key\\\\\\\"\\\\nI1002 10:53:07.918100 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1002 10:53:07.922528 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1002 10:53:07.922551 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1002 10:53:07.922578 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1002 10:53:07.922585 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1002 10:53:07.930960 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nI1002 10:53:07.930984 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nW1002 10:53:07.930999 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931026 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1002 10:53:07.931031 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1002 10:53:07.931036 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1002 10:53:07.931039 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1002 10:53:07.931042 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nF1002 10:53:07.933737 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:52Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:26Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://85188b6d7b5901c957d4ea576d5baab74589a2300f15d6210915fd78f6171348\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://68e1c8936f80f0a59c581ba2ef00bb7225eff652a07c97ee5690a40c954f4008\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.693843 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"acf018d3-a71b-450d-88ef-8f991d5d2219\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:50Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://cf7913b5261e05e8bbfb22a81635ff274a8a28d550d004a5349a9ed84ba5c8b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fcc4f647d50346cff07c267ebe645576d032b01e443eaabe4b9aa73b91966e4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cbdd2ca18244e601967c7e10b1f8f1f02fcf33bfee712d78f7715909c95581b9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4f0e111ce373594ac0f4bf89b06fd2008eb6554e566575b88e4a4f6beaaa29d9\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.705952 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9fd4a14eb803afc1b1efecd7c9b8560546554be16de62e8db79752bce61a47dc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8e50292888ad1f8cb9b64dcecfa47015e88b7a560123e50d4a3bf1550b87cfaa\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.719804 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-wmn4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6c8d5bc-163f-401f-bdc5-4625112dced9\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:59Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-10-02T10:53:57Z\\\",\\\"message\\\":\\\"2025-10-02T10:53:12+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669\\\\n2025-10-02T10:53:12+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_99ebc822-96d6-4c29-af00-ccf9687e5669 to /host/opt/cni/bin/\\\\n2025-10-02T10:53:12Z [verbose] multus-daemon started\\\\n2025-10-02T10:53:12Z [verbose] Readiness Indicator file check\\\\n2025-10-02T10:53:57Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:58Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-l6gs6\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-wmn4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.729151 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-ks7tf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0cbc7f6d-232e-484d-9afc-7111e428762c\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://160ecc09cabb40cb5867a449f2bb1707da2367f5b10cc4f53276b372b2641bc9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-fj8jd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-ks7tf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.744547 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.744598 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.744609 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.744629 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.744644 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.745199 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"39fe3cc2-3136-44ab-8039-15da972eedad\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:52:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3a7b7bb194e7cc66b1765fc3dc2f4d9c6aafa562cb9fcde19f862fda5e34282\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5dc6c37e90ca8574d11db2bcc1137a63af565614eccef178d05b863c6a839b07\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:49Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://082040f0d8c15812040aa0d45d22f327c303d526e72d35524470348c34b4e74a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:52:50Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1921ea86697ee9de43cd3e9b3b6673ad627b9997cd9f4738e8bd86428e5ef02b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:52:49Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:52:48Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:52:47Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.759013 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.773076 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:08Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.783744 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8dec06359fbdd507aff2ffef8bd1114892cd9e4732b0f21139fd60b5765bc074\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.798690 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:09Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a4a3fab4917d2aaaba6208c2c5c4dd19b845e8d52e5e350566d8a78cfd581604\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.815042 4783 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"94681624-a0a9-443a-9b4d-715182399740\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-10-02T10:53:17Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7b4ea5dd6b4b12458d54c54f155ff89b39e168c234f9e96f04375a0ed60ef09e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-10-02T10:53:17Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5dfb7743aba405a8d50ebd4af6f29023879d76cf2019d7187a62d4f906b00cf5\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:11Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://da0c8d1799c69e744ac02d04bf2e7460a3fdf2ee3e9feec487d5df44f68abf22\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:12Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5df62be0e9b685f376fb5c759dfd93eb98b09e95e66d8179ad0dc708b930ab63\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:13Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2806bbcd174a61c66031bb570e8ec310a9e1c9dfe801b083fa9bc16b04af5004\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:14Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:13Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://805835883b20aab6d9424208c66984ca8c94b364850088fdeef4121271cfabc4\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:15Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:14Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ba0c3ac73a45c1d64579ae30a3d0e795bcc4e9fb461c9bc58cedfe51e8fd270e\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-10-02T10:53:16Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-10-02T10:53:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-h8zq9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-10-02T10:53:10Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-jqvp2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-10-02T10:54:27Z is after 2025-08-24T17:21:41Z" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.847320 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.847349 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.847358 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.847371 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.847380 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.949407 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.949506 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.949529 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.949557 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:27 crc kubenswrapper[4783]: I1002 10:54:27.949580 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:27Z","lastTransitionTime":"2025-10-02T10:54:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.052084 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.052150 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.052181 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.052209 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.052231 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.155141 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.155217 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.155240 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.155272 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.155295 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.258780 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.258826 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.258844 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.258869 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.258888 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.361567 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.361924 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.362084 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.362245 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.362271 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.434608 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:28 crc kubenswrapper[4783]: E1002 10:54:28.434831 4783 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:54:28 crc kubenswrapper[4783]: E1002 10:54:28.434914 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs podName:f05f5bf0-b0a7-453b-999b-8ef23ca6cc68 nodeName:}" failed. No retries permitted until 2025-10-02 10:55:32.434886775 +0000 UTC m=+165.751081076 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs") pod "network-metrics-daemon-6qbg4" (UID: "f05f5bf0-b0a7-453b-999b-8ef23ca6cc68") : object "openshift-multus"/"metrics-daemon-secret" not registered Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.464645 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.464705 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.464722 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.464746 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.464763 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.543895 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.543909 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.543978 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.544051 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:28 crc kubenswrapper[4783]: E1002 10:54:28.544208 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:28 crc kubenswrapper[4783]: E1002 10:54:28.544639 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:28 crc kubenswrapper[4783]: E1002 10:54:28.544823 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:28 crc kubenswrapper[4783]: E1002 10:54:28.544983 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.567967 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.568030 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.568054 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.568079 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.568097 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.671096 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.671444 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.671549 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.671626 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.671692 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.773577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.773614 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.773623 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.773636 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.773645 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.875869 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.875923 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.875940 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.875963 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.875980 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.978225 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.978274 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.978286 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.978302 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:28 crc kubenswrapper[4783]: I1002 10:54:28.978315 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:28Z","lastTransitionTime":"2025-10-02T10:54:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.080520 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.080630 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.080649 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.080700 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.080719 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.183954 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.183998 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.184010 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.184026 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.184038 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.286920 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.286981 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.287013 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.287038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.287056 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.390647 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.390697 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.390715 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.390739 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.390756 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.493744 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.493808 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.493829 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.493857 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.493881 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.596967 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.597029 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.597047 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.597071 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.597089 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.700093 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.700146 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.700163 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.700182 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.700197 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.803647 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.803716 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.803741 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.803769 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.803792 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.906740 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.906792 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.906809 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.906836 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:29 crc kubenswrapper[4783]: I1002 10:54:29.906858 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:29Z","lastTransitionTime":"2025-10-02T10:54:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.009281 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.009346 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.009362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.009387 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.009406 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.112407 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.112492 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.112508 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.112530 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.112547 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.214935 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.214996 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.215015 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.215048 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.215066 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.317205 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.317270 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.317288 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.317324 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.317343 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.420341 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.420390 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.420403 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.420439 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.420451 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.522631 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.522700 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.522717 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.522742 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.522759 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.544563 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.544563 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.544744 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.544824 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:30 crc kubenswrapper[4783]: E1002 10:54:30.544976 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:30 crc kubenswrapper[4783]: E1002 10:54:30.545139 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.545261 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 10:54:30 crc kubenswrapper[4783]: E1002 10:54:30.545350 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:30 crc kubenswrapper[4783]: E1002 10:54:30.545266 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:30 crc kubenswrapper[4783]: E1002 10:54:30.545460 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.625926 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.625978 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.625994 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.626015 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.626031 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.728911 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.728987 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.729008 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.729034 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.729052 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.832173 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.832222 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.832235 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.832257 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.832270 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.935754 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.935828 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.935847 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.935870 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:30 crc kubenswrapper[4783]: I1002 10:54:30.935887 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:30Z","lastTransitionTime":"2025-10-02T10:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.039142 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.039215 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.039242 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.039272 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.039291 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.141481 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.141536 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.141553 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.141576 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.141594 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.244892 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.244954 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.244972 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.244994 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.245011 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.347859 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.347910 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.347926 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.347948 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.347965 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.450815 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.450855 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.450868 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.450885 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.450897 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.553254 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.553314 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.553335 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.553362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.553386 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.655902 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.655973 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.655989 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.656017 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.656034 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.759089 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.759126 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.759137 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.759151 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.759162 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.862333 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.862447 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.862467 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.862497 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.862518 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.965990 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.966052 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.966070 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.966097 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:31 crc kubenswrapper[4783]: I1002 10:54:31.966118 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:31Z","lastTransitionTime":"2025-10-02T10:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.069297 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.069377 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.069395 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.069453 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.069475 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.172515 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.172574 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.172585 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.172617 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.172630 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.275784 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.275849 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.275866 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.275890 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.275907 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.378143 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.378224 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.378254 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.378286 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.378306 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.481069 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.481115 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.481131 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.481156 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.481173 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.544178 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.544299 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.544317 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.544198 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:32 crc kubenswrapper[4783]: E1002 10:54:32.544481 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:32 crc kubenswrapper[4783]: E1002 10:54:32.544586 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:32 crc kubenswrapper[4783]: E1002 10:54:32.544682 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:32 crc kubenswrapper[4783]: E1002 10:54:32.544808 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.584366 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.584480 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.584503 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.584536 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.584560 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.687447 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.687507 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.687525 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.687550 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.687568 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.790145 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.790207 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.790221 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.790239 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.790252 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.893125 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.893221 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.893238 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.893263 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.893281 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.996667 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.996720 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.996737 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.996761 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:32 crc kubenswrapper[4783]: I1002 10:54:32.996780 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:32Z","lastTransitionTime":"2025-10-02T10:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.099485 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.099550 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.099566 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.099593 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.099613 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.203297 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.203364 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.203382 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.203439 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.203461 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.306558 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.306615 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.306627 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.306646 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.306659 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.410553 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.410621 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.410641 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.410668 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.410688 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.514503 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.514575 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.514597 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.514624 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.514645 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.617601 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.617662 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.617706 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.617731 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.617749 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.721324 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.721401 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.721437 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.721458 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.721471 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.823656 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.823686 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.823696 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.823709 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.823717 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.925958 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.926013 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.926023 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.926038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:33 crc kubenswrapper[4783]: I1002 10:54:33.926048 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:33Z","lastTransitionTime":"2025-10-02T10:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.028362 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.028527 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.028589 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.028624 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.028649 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.130902 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.130947 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.130958 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.130973 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.130984 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.234511 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.234565 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.234577 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.234596 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.234610 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.337882 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.338008 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.338029 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.338051 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.338069 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.441340 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.441447 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.441473 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.441503 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.441528 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.543885 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.544016 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.544058 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:34 crc kubenswrapper[4783]: E1002 10:54:34.544226 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.544312 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:34 crc kubenswrapper[4783]: E1002 10:54:34.544566 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:34 crc kubenswrapper[4783]: E1002 10:54:34.544638 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:34 crc kubenswrapper[4783]: E1002 10:54:34.545022 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.546206 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.546262 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.546284 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.546310 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.546332 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.649296 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.649617 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.649707 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.649797 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.649900 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.752392 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.752861 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.753037 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.753247 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.753407 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.856920 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.857002 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.857036 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.857066 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.857122 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.959951 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.960020 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.960056 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.960072 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:34 crc kubenswrapper[4783]: I1002 10:54:34.960082 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:34Z","lastTransitionTime":"2025-10-02T10:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.063381 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.063466 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.063493 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.063517 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.063535 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.166366 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.166717 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.166807 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.166894 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.166996 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.269585 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.269629 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.269642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.269658 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.269669 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.371819 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.371930 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.371957 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.371986 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.372007 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.474908 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.474970 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.474991 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.475018 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.475043 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.577587 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.577660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.577682 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.577713 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.577736 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.680374 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.680480 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.680498 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.680522 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.680539 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.783932 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.783997 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.784015 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.784038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.784055 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.887666 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.887732 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.887756 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.887785 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.887806 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.991624 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.991687 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.991710 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.991739 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:35 crc kubenswrapper[4783]: I1002 10:54:35.991763 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:35Z","lastTransitionTime":"2025-10-02T10:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.094900 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.094962 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.094979 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.095021 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.095040 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.197660 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.197722 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.197745 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.197772 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.197791 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.301360 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.301465 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.301503 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.301537 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.301557 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.404408 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.404599 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.404642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.404673 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.404700 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.507203 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.507241 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.507249 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.507262 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.507271 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.544663 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.544672 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.544839 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:36 crc kubenswrapper[4783]: E1002 10:54:36.545196 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.544856 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:36 crc kubenswrapper[4783]: E1002 10:54:36.545484 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:36 crc kubenswrapper[4783]: E1002 10:54:36.545757 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:36 crc kubenswrapper[4783]: E1002 10:54:36.545809 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.609984 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.610029 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.610038 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.610054 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.610072 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.713479 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.713538 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.713555 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.713579 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.713597 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.816829 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.816886 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.816908 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.816937 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.816960 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.920542 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.920602 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.920619 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.920642 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.920659 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.922283 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.922369 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.922387 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.922435 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.922454 4783 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-10-02T10:54:36Z","lastTransitionTime":"2025-10-02T10:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.992435 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t"] Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.992897 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.995282 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.995766 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.996802 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Oct 02 10:54:36 crc kubenswrapper[4783]: I1002 10:54:36.999226 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.031052 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/14efb254-184d-4033-b4e6-627bb2e221d4-service-ca\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.031110 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14efb254-184d-4033-b4e6-627bb2e221d4-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.031177 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14efb254-184d-4033-b4e6-627bb2e221d4-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.031231 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/14efb254-184d-4033-b4e6-627bb2e221d4-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.031447 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/14efb254-184d-4033-b4e6-627bb2e221d4-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.042617 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=87.042586601 podStartE2EDuration="1m27.042586601s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.041562794 +0000 UTC m=+110.357757085" watchObservedRunningTime="2025-10-02 10:54:37.042586601 +0000 UTC m=+110.358780892" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.088654 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=89.088624089 podStartE2EDuration="1m29.088624089s" podCreationTimestamp="2025-10-02 10:53:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.08671302 +0000 UTC m=+110.402907331" watchObservedRunningTime="2025-10-02 10:54:37.088624089 +0000 UTC m=+110.404818390" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.088928 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.088914437 podStartE2EDuration="1m29.088914437s" podCreationTimestamp="2025-10-02 10:53:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.066666652 +0000 UTC m=+110.382860953" watchObservedRunningTime="2025-10-02 10:54:37.088914437 +0000 UTC m=+110.405108778" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.132283 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/14efb254-184d-4033-b4e6-627bb2e221d4-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.132339 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14efb254-184d-4033-b4e6-627bb2e221d4-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.132391 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/14efb254-184d-4033-b4e6-627bb2e221d4-service-ca\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.132461 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14efb254-184d-4033-b4e6-627bb2e221d4-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.132499 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/14efb254-184d-4033-b4e6-627bb2e221d4-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.132491 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/14efb254-184d-4033-b4e6-627bb2e221d4-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.132840 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/14efb254-184d-4033-b4e6-627bb2e221d4-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.133877 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/14efb254-184d-4033-b4e6-627bb2e221d4-service-ca\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.138085 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/14efb254-184d-4033-b4e6-627bb2e221d4-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.140350 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podStartSLOduration=88.140327664 podStartE2EDuration="1m28.140327664s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.123646163 +0000 UTC m=+110.439840454" watchObservedRunningTime="2025-10-02 10:54:37.140327664 +0000 UTC m=+110.456521935" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.160510 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/14efb254-184d-4033-b4e6-627bb2e221d4-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-x6m4t\" (UID: \"14efb254-184d-4033-b4e6-627bb2e221d4\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.161114 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-vxpvq" podStartSLOduration=88.16109452 podStartE2EDuration="1m28.16109452s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.140856258 +0000 UTC m=+110.457050529" watchObservedRunningTime="2025-10-02 10:54:37.16109452 +0000 UTC m=+110.477288791" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.161834 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=55.161825269 podStartE2EDuration="55.161825269s" podCreationTimestamp="2025-10-02 10:53:42 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.161294995 +0000 UTC m=+110.477489266" watchObservedRunningTime="2025-10-02 10:54:37.161825269 +0000 UTC m=+110.478019540" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.238540 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-wmn4g" podStartSLOduration=88.238522869 podStartE2EDuration="1m28.238522869s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.225276977 +0000 UTC m=+110.541471248" watchObservedRunningTime="2025-10-02 10:54:37.238522869 +0000 UTC m=+110.554717130" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.238663 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-ks7tf" podStartSLOduration=88.238659373 podStartE2EDuration="1m28.238659373s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.238394966 +0000 UTC m=+110.554589237" watchObservedRunningTime="2025-10-02 10:54:37.238659373 +0000 UTC m=+110.554853634" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.288435 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-jqvp2" podStartSLOduration=88.288399387 podStartE2EDuration="1m28.288399387s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.278805919 +0000 UTC m=+110.595000170" watchObservedRunningTime="2025-10-02 10:54:37.288399387 +0000 UTC m=+110.604593648" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.301140 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=39.301118655 podStartE2EDuration="39.301118655s" podCreationTimestamp="2025-10-02 10:53:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.300965371 +0000 UTC m=+110.617159632" watchObservedRunningTime="2025-10-02 10:54:37.301118655 +0000 UTC m=+110.617312926" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.314964 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" Oct 02 10:54:37 crc kubenswrapper[4783]: I1002 10:54:37.379890 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-2q7k8" podStartSLOduration=87.379871968 podStartE2EDuration="1m27.379871968s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:37.369207593 +0000 UTC m=+110.685401864" watchObservedRunningTime="2025-10-02 10:54:37.379871968 +0000 UTC m=+110.696066229" Oct 02 10:54:38 crc kubenswrapper[4783]: I1002 10:54:38.168926 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" event={"ID":"14efb254-184d-4033-b4e6-627bb2e221d4","Type":"ContainerStarted","Data":"96cf1994aed22c6f73f0377d0d89033225d837fbdb6eebd48e77ae0e00fe5bfb"} Oct 02 10:54:38 crc kubenswrapper[4783]: I1002 10:54:38.169009 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" event={"ID":"14efb254-184d-4033-b4e6-627bb2e221d4","Type":"ContainerStarted","Data":"698f6c503d1a47b3fb2051b19aff07a25c9f73b6c334d1d86ed50db6627faaba"} Oct 02 10:54:38 crc kubenswrapper[4783]: I1002 10:54:38.184144 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-x6m4t" podStartSLOduration=89.184122671 podStartE2EDuration="1m29.184122671s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:38.183580367 +0000 UTC m=+111.499774688" watchObservedRunningTime="2025-10-02 10:54:38.184122671 +0000 UTC m=+111.500316932" Oct 02 10:54:38 crc kubenswrapper[4783]: I1002 10:54:38.545367 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:38 crc kubenswrapper[4783]: I1002 10:54:38.545463 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:38 crc kubenswrapper[4783]: I1002 10:54:38.545469 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:38 crc kubenswrapper[4783]: I1002 10:54:38.546001 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:38 crc kubenswrapper[4783]: E1002 10:54:38.546143 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:38 crc kubenswrapper[4783]: E1002 10:54:38.546340 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:38 crc kubenswrapper[4783]: E1002 10:54:38.546386 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:38 crc kubenswrapper[4783]: E1002 10:54:38.546468 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:40 crc kubenswrapper[4783]: I1002 10:54:40.544748 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:40 crc kubenswrapper[4783]: I1002 10:54:40.544815 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:40 crc kubenswrapper[4783]: I1002 10:54:40.544765 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:40 crc kubenswrapper[4783]: I1002 10:54:40.544763 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:40 crc kubenswrapper[4783]: E1002 10:54:40.544883 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:40 crc kubenswrapper[4783]: E1002 10:54:40.545118 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:40 crc kubenswrapper[4783]: E1002 10:54:40.545326 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:40 crc kubenswrapper[4783]: E1002 10:54:40.545455 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:41 crc kubenswrapper[4783]: I1002 10:54:41.545380 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 10:54:41 crc kubenswrapper[4783]: E1002 10:54:41.545650 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-qmd84_openshift-ovn-kubernetes(095cdcdf-1ea0-40da-871a-1223c6737377)\"" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" Oct 02 10:54:42 crc kubenswrapper[4783]: I1002 10:54:42.544051 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:42 crc kubenswrapper[4783]: I1002 10:54:42.544072 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:42 crc kubenswrapper[4783]: I1002 10:54:42.544153 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:42 crc kubenswrapper[4783]: E1002 10:54:42.544319 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:42 crc kubenswrapper[4783]: E1002 10:54:42.544472 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:42 crc kubenswrapper[4783]: E1002 10:54:42.544574 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:42 crc kubenswrapper[4783]: I1002 10:54:42.544603 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:42 crc kubenswrapper[4783]: E1002 10:54:42.544755 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.190184 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/1.log" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.190762 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/0.log" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.190805 4783 generic.go:334] "Generic (PLEG): container finished" podID="f6c8d5bc-163f-401f-bdc5-4625112dced9" containerID="b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b" exitCode=1 Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.190845 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerDied","Data":"b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b"} Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.190886 4783 scope.go:117] "RemoveContainer" containerID="66ec2f190b5004026dfc36e595ca483d08c1dc07796317d28f9dfd252e391fe7" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.192150 4783 scope.go:117] "RemoveContainer" containerID="b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b" Oct 02 10:54:44 crc kubenswrapper[4783]: E1002 10:54:44.192534 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-wmn4g_openshift-multus(f6c8d5bc-163f-401f-bdc5-4625112dced9)\"" pod="openshift-multus/multus-wmn4g" podUID="f6c8d5bc-163f-401f-bdc5-4625112dced9" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.544473 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.544515 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.544539 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:44 crc kubenswrapper[4783]: E1002 10:54:44.544622 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:44 crc kubenswrapper[4783]: I1002 10:54:44.544664 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:44 crc kubenswrapper[4783]: E1002 10:54:44.544839 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:44 crc kubenswrapper[4783]: E1002 10:54:44.544966 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:44 crc kubenswrapper[4783]: E1002 10:54:44.545071 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:45 crc kubenswrapper[4783]: I1002 10:54:45.195285 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/1.log" Oct 02 10:54:46 crc kubenswrapper[4783]: I1002 10:54:46.543796 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:46 crc kubenswrapper[4783]: I1002 10:54:46.543907 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:46 crc kubenswrapper[4783]: I1002 10:54:46.544163 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:46 crc kubenswrapper[4783]: I1002 10:54:46.544206 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:46 crc kubenswrapper[4783]: E1002 10:54:46.544500 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:46 crc kubenswrapper[4783]: E1002 10:54:46.544544 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:46 crc kubenswrapper[4783]: E1002 10:54:46.544738 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:46 crc kubenswrapper[4783]: E1002 10:54:46.544889 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:47 crc kubenswrapper[4783]: E1002 10:54:47.524166 4783 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Oct 02 10:54:47 crc kubenswrapper[4783]: E1002 10:54:47.673355 4783 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 02 10:54:48 crc kubenswrapper[4783]: I1002 10:54:48.544489 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:48 crc kubenswrapper[4783]: I1002 10:54:48.544536 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:48 crc kubenswrapper[4783]: I1002 10:54:48.544536 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:48 crc kubenswrapper[4783]: E1002 10:54:48.544678 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:48 crc kubenswrapper[4783]: I1002 10:54:48.544764 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:48 crc kubenswrapper[4783]: E1002 10:54:48.544944 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:48 crc kubenswrapper[4783]: E1002 10:54:48.545074 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:48 crc kubenswrapper[4783]: E1002 10:54:48.545134 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:50 crc kubenswrapper[4783]: I1002 10:54:50.544325 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:50 crc kubenswrapper[4783]: I1002 10:54:50.544376 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:50 crc kubenswrapper[4783]: E1002 10:54:50.544493 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:50 crc kubenswrapper[4783]: I1002 10:54:50.544404 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:50 crc kubenswrapper[4783]: I1002 10:54:50.544383 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:50 crc kubenswrapper[4783]: E1002 10:54:50.544636 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:50 crc kubenswrapper[4783]: E1002 10:54:50.544693 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:50 crc kubenswrapper[4783]: E1002 10:54:50.544815 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:52 crc kubenswrapper[4783]: I1002 10:54:52.544645 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:52 crc kubenswrapper[4783]: E1002 10:54:52.545190 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:52 crc kubenswrapper[4783]: I1002 10:54:52.544682 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:52 crc kubenswrapper[4783]: E1002 10:54:52.545252 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:52 crc kubenswrapper[4783]: I1002 10:54:52.544645 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:52 crc kubenswrapper[4783]: E1002 10:54:52.545299 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:52 crc kubenswrapper[4783]: I1002 10:54:52.544665 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:52 crc kubenswrapper[4783]: E1002 10:54:52.545342 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:52 crc kubenswrapper[4783]: E1002 10:54:52.674445 4783 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 02 10:54:54 crc kubenswrapper[4783]: I1002 10:54:54.544214 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:54 crc kubenswrapper[4783]: I1002 10:54:54.544250 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:54 crc kubenswrapper[4783]: I1002 10:54:54.544332 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:54 crc kubenswrapper[4783]: I1002 10:54:54.544483 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:54 crc kubenswrapper[4783]: E1002 10:54:54.544506 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:54 crc kubenswrapper[4783]: E1002 10:54:54.544635 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:54 crc kubenswrapper[4783]: E1002 10:54:54.544737 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:54 crc kubenswrapper[4783]: E1002 10:54:54.544875 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:56 crc kubenswrapper[4783]: I1002 10:54:56.544843 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:56 crc kubenswrapper[4783]: I1002 10:54:56.544876 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:56 crc kubenswrapper[4783]: I1002 10:54:56.544942 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:56 crc kubenswrapper[4783]: E1002 10:54:56.545168 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:56 crc kubenswrapper[4783]: I1002 10:54:56.545255 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:56 crc kubenswrapper[4783]: E1002 10:54:56.545548 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:56 crc kubenswrapper[4783]: E1002 10:54:56.545733 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:56 crc kubenswrapper[4783]: E1002 10:54:56.545825 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:56 crc kubenswrapper[4783]: I1002 10:54:56.546388 4783 scope.go:117] "RemoveContainer" containerID="b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b" Oct 02 10:54:56 crc kubenswrapper[4783]: I1002 10:54:56.546534 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.241660 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/3.log" Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.247448 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerStarted","Data":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.249012 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.252012 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/1.log" Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.252064 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerStarted","Data":"5e9a142014f6188db79c661039a5fb0036ea5e97daddbfbe9d12633dbdba8daa"} Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.286234 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podStartSLOduration=107.286209938 podStartE2EDuration="1m47.286209938s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:54:57.285010647 +0000 UTC m=+130.601204908" watchObservedRunningTime="2025-10-02 10:54:57.286209938 +0000 UTC m=+130.602404209" Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.653951 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-6qbg4"] Oct 02 10:54:57 crc kubenswrapper[4783]: I1002 10:54:57.654055 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:57 crc kubenswrapper[4783]: E1002 10:54:57.654132 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:54:57 crc kubenswrapper[4783]: E1002 10:54:57.676215 4783 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Oct 02 10:54:58 crc kubenswrapper[4783]: I1002 10:54:58.544255 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:54:58 crc kubenswrapper[4783]: I1002 10:54:58.544364 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:54:58 crc kubenswrapper[4783]: E1002 10:54:58.544483 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:54:58 crc kubenswrapper[4783]: I1002 10:54:58.544582 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:54:58 crc kubenswrapper[4783]: E1002 10:54:58.544755 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:54:58 crc kubenswrapper[4783]: E1002 10:54:58.544849 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:54:59 crc kubenswrapper[4783]: I1002 10:54:59.544168 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:54:59 crc kubenswrapper[4783]: E1002 10:54:59.544370 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:55:00 crc kubenswrapper[4783]: I1002 10:55:00.544398 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:55:00 crc kubenswrapper[4783]: I1002 10:55:00.544504 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:55:00 crc kubenswrapper[4783]: I1002 10:55:00.544444 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:00 crc kubenswrapper[4783]: E1002 10:55:00.544629 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:55:00 crc kubenswrapper[4783]: E1002 10:55:00.544746 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:55:00 crc kubenswrapper[4783]: E1002 10:55:00.544881 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:55:01 crc kubenswrapper[4783]: I1002 10:55:01.544282 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:55:01 crc kubenswrapper[4783]: E1002 10:55:01.544498 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-6qbg4" podUID="f05f5bf0-b0a7-453b-999b-8ef23ca6cc68" Oct 02 10:55:02 crc kubenswrapper[4783]: I1002 10:55:02.561702 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:55:02 crc kubenswrapper[4783]: I1002 10:55:02.561804 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:55:02 crc kubenswrapper[4783]: I1002 10:55:02.561731 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:02 crc kubenswrapper[4783]: E1002 10:55:02.561910 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Oct 02 10:55:02 crc kubenswrapper[4783]: E1002 10:55:02.562131 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Oct 02 10:55:02 crc kubenswrapper[4783]: E1002 10:55:02.562272 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Oct 02 10:55:03 crc kubenswrapper[4783]: I1002 10:55:03.544724 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:55:03 crc kubenswrapper[4783]: I1002 10:55:03.548146 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Oct 02 10:55:03 crc kubenswrapper[4783]: I1002 10:55:03.548212 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Oct 02 10:55:04 crc kubenswrapper[4783]: I1002 10:55:04.544798 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:55:04 crc kubenswrapper[4783]: I1002 10:55:04.544831 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:55:04 crc kubenswrapper[4783]: I1002 10:55:04.545579 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:04 crc kubenswrapper[4783]: I1002 10:55:04.548452 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Oct 02 10:55:04 crc kubenswrapper[4783]: I1002 10:55:04.548488 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Oct 02 10:55:04 crc kubenswrapper[4783]: I1002 10:55:04.548982 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Oct 02 10:55:04 crc kubenswrapper[4783]: I1002 10:55:04.551059 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Oct 02 10:55:05 crc kubenswrapper[4783]: I1002 10:55:05.511766 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.819754 4783 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.877053 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.877634 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.878921 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.879333 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.889327 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.889839 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.889865 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.889932 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.890036 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.890343 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.890667 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.890765 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.893006 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.893785 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.895297 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.897556 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.897693 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.897923 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-xlxdx"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.898803 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.901198 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4cnvp"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.905000 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.916356 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.918342 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dzxd5"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.919053 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.919663 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.919884 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.920026 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.920170 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.920311 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.920474 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.920838 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.920980 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.922547 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.922751 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.922887 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.923727 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.937459 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.937684 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.938027 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.938171 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.938273 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.938438 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.940195 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x9scv"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.940579 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ql8ns"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.940864 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.941320 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.941388 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.941661 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.942011 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.942316 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.942474 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.942623 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.942743 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.942935 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.943044 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.943107 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-vjcp4"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.943149 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.943237 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.943348 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.949289 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.949554 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.949581 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.949718 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.950042 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.952284 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.953510 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.954163 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.955967 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.959474 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.959839 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.972951 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.973894 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.974850 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-r7v8v"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.975297 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.981377 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-r6pqv"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.981835 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.981989 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cm245"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.982441 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.983510 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.983784 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.988082 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.988216 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-shp99"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.988662 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.988858 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.989167 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.989495 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.990586 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj"] Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.992843 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.993007 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.993180 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994099 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/50389666-bf4f-4442-a4cd-f3609994ce1b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994129 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fm8hm\" (UniqueName: \"kubernetes.io/projected/20842d7f-97fb-42e7-80ff-f51a26a55970-kube-api-access-fm8hm\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994151 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-audit-policies\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994167 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgspg\" (UniqueName: \"kubernetes.io/projected/01de2d55-d330-461b-b801-0bfe3078baab-kube-api-access-vgspg\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994181 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a1ba5a71-7a04-4446-8459-1748799af4db-audit-dir\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994194 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50389666-bf4f-4442-a4cd-f3609994ce1b-config\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994210 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-client-ca\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994226 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-etcd-client\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994244 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-serving-cert\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994265 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/50389666-bf4f-4442-a4cd-f3609994ce1b-images\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994282 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994296 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994312 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994328 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994342 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994358 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994373 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994390 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-encryption-config\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994404 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994454 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994475 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hppqz\" (UniqueName: \"kubernetes.io/projected/50389666-bf4f-4442-a4cd-f3609994ce1b-kube-api-access-hppqz\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994491 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-config\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994536 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f2pbd\" (UniqueName: \"kubernetes.io/projected/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-kube-api-access-f2pbd\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994557 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-serving-cert\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994582 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994598 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994612 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994626 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jtxl\" (UniqueName: \"kubernetes.io/projected/a1ba5a71-7a04-4446-8459-1748799af4db-kube-api-access-9jtxl\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994640 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-audit-policies\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994657 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20842d7f-97fb-42e7-80ff-f51a26a55970-config\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994672 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01de2d55-d330-461b-b801-0bfe3078baab-audit-dir\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994686 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.994702 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20842d7f-97fb-42e7-80ff-f51a26a55970-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.995074 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.996993 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.997337 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Oct 02 10:55:07 crc kubenswrapper[4783]: I1002 10:55:07.999833 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.000058 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.000814 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.001041 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.001206 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.001378 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.001514 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.001636 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.002328 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.002655 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-zwxct"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.002886 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.002958 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.002931 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.002893 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.003813 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9hxcf"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.004216 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9hj2c"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.004534 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.004930 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-x4fgp"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.005311 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.006119 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.006322 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.006384 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.006922 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.006950 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007182 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007293 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007403 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007499 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007574 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007681 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007778 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.007985 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.008102 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.008200 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.008500 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.008772 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.008855 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.009006 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.027900 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.028435 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.028765 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.034751 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.035345 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.036118 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.041718 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.042607 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.042734 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.045023 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.045023 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.045383 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.045549 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.045935 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.053460 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.053742 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.053949 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.054070 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.054232 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.072050 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.072251 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.072823 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.073183 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.073217 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.073340 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.073477 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.073832 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.074100 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.074336 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.075240 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.075717 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.075825 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.075874 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.078585 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.079204 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.079759 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.080134 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.080397 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.083379 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.084762 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.087492 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.087513 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.087648 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.089649 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.094229 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.094959 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-q7g9p"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095399 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095564 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20842d7f-97fb-42e7-80ff-f51a26a55970-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095602 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-client\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095620 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-serving-cert\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095637 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-trusted-ca-bundle\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095654 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ec0d7d6-9f6a-43cb-984b-4e162c07da70-metrics-tls\") pod \"dns-operator-744455d44c-9hxcf\" (UID: \"0ec0d7d6-9f6a-43cb-984b-4e162c07da70\") " pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095669 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/032450eb-6d0d-4cc2-90ee-c1ae7228f735-serving-cert\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095685 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-serving-cert\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095706 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/feedb992-610e-4ceb-84f1-7d5a005d7826-audit-dir\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095726 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-trusted-ca\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095742 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6670b373-8502-4db3-9f27-461eca66f043-proxy-tls\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095757 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5b90e90f-8fb3-440c-b0d3-20f9582764ea-machine-approver-tls\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095783 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/50389666-bf4f-4442-a4cd-f3609994ce1b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095799 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fm8hm\" (UniqueName: \"kubernetes.io/projected/20842d7f-97fb-42e7-80ff-f51a26a55970-kube-api-access-fm8hm\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095817 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-encryption-config\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095835 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-audit-policies\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095850 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgspg\" (UniqueName: \"kubernetes.io/projected/01de2d55-d330-461b-b801-0bfe3078baab-kube-api-access-vgspg\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095867 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-oauth-config\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095882 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-oauth-serving-cert\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095898 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6670b373-8502-4db3-9f27-461eca66f043-auth-proxy-config\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095912 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-etcd-client\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095928 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a1ba5a71-7a04-4446-8459-1748799af4db-audit-dir\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095943 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50389666-bf4f-4442-a4cd-f3609994ce1b-config\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095958 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-image-import-ca\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095975 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-client-ca\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.095994 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c84k4\" (UniqueName: \"kubernetes.io/projected/6670b373-8502-4db3-9f27-461eca66f043-kube-api-access-c84k4\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096018 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-etcd-client\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096032 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-serving-cert\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096049 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-ca\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096064 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/feedb992-610e-4ceb-84f1-7d5a005d7826-node-pullsecrets\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096082 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7zfcg\" (UniqueName: \"kubernetes.io/projected/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-kube-api-access-7zfcg\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096097 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-audit\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096114 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5b90e90f-8fb3-440c-b0d3-20f9582764ea-auth-proxy-config\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096131 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/50389666-bf4f-4442-a4cd-f3609994ce1b-images\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096146 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cc27s\" (UniqueName: \"kubernetes.io/projected/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-kube-api-access-cc27s\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096162 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096183 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096198 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-config\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096215 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096232 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096248 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096265 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2j856\" (UniqueName: \"kubernetes.io/projected/5b90e90f-8fb3-440c-b0d3-20f9582764ea-kube-api-access-2j856\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096281 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cfdcm\" (UniqueName: \"kubernetes.io/projected/0ec0d7d6-9f6a-43cb-984b-4e162c07da70-kube-api-access-cfdcm\") pod \"dns-operator-744455d44c-9hxcf\" (UID: \"0ec0d7d6-9f6a-43cb-984b-4e162c07da70\") " pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096296 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6670b373-8502-4db3-9f27-461eca66f043-images\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096315 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-serving-cert\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096331 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-config\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096771 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-etcd-serving-ca\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096799 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096880 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096916 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096935 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-service-ca\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096957 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c6gvr\" (UniqueName: \"kubernetes.io/projected/71af7827-a6d5-4a87-9839-996ab528213d-kube-api-access-c6gvr\") pod \"downloads-7954f5f757-r6pqv\" (UID: \"71af7827-a6d5-4a87-9839-996ab528213d\") " pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096972 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-config\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096997 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-encryption-config\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097014 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097031 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097048 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-serving-cert\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097063 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097080 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097094 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b90e90f-8fb3-440c-b0d3-20f9582764ea-config\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097156 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hppqz\" (UniqueName: \"kubernetes.io/projected/50389666-bf4f-4442-a4cd-f3609994ce1b-kube-api-access-hppqz\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097190 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-config\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097207 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f2pbd\" (UniqueName: \"kubernetes.io/projected/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-kube-api-access-f2pbd\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097229 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097239 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-serving-cert\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097257 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097274 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lp9dj\" (UniqueName: \"kubernetes.io/projected/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-kube-api-access-lp9dj\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097292 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-config\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097307 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxc5c\" (UniqueName: \"kubernetes.io/projected/feedb992-610e-4ceb-84f1-7d5a005d7826-kube-api-access-nxc5c\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097328 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097346 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jtxl\" (UniqueName: \"kubernetes.io/projected/a1ba5a71-7a04-4446-8459-1748799af4db-kube-api-access-9jtxl\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097362 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097379 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s24mk\" (UniqueName: \"kubernetes.io/projected/032450eb-6d0d-4cc2-90ee-c1ae7228f735-kube-api-access-s24mk\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097398 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-audit-policies\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097428 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-service-ca\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097450 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20842d7f-97fb-42e7-80ff-f51a26a55970-config\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097465 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01de2d55-d330-461b-b801-0bfe3078baab-audit-dir\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.097484 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.099750 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.100397 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/50389666-bf4f-4442-a4cd-f3609994ce1b-images\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.101280 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-client-ca\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.102101 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.103606 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-audit-policies\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.103739 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a1ba5a71-7a04-4446-8459-1748799af4db-audit-dir\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.104257 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/50389666-bf4f-4442-a4cd-f3609994ce1b-config\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.105677 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-config\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.106757 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.106807 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9m8m9"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.107263 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-rq6qg"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.107855 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.108364 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.111198 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.111746 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.112476 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.112949 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20842d7f-97fb-42e7-80ff-f51a26a55970-config\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.112992 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01de2d55-d330-461b-b801-0bfe3078baab-audit-dir\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.115119 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-audit-policies\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.096233 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.115643 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.115671 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ql8ns"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.115683 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dzxd5"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.117089 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a1ba5a71-7a04-4446-8459-1748799af4db-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.117984 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-shp99"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.118466 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-etcd-client\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.119503 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-xlxdx"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.120534 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.120684 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-r6pqv"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.121887 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.123173 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.125825 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.127368 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.129185 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.129236 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.133483 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.140038 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.140693 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.140786 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/20842d7f-97fb-42e7-80ff-f51a26a55970-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.141096 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.141123 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.141102 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-encryption-config\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.141174 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.141232 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/50389666-bf4f-4442-a4cd-f3609994ce1b-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.141858 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1ba5a71-7a04-4446-8459-1748799af4db-serving-cert\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.141922 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.143573 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.143867 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.144065 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-serving-cert\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.146572 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.150171 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.157343 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x9scv"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.159039 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.163517 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4cnvp"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.163562 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.164960 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9hxcf"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.165003 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.165614 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-s7hdh"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.166174 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.167697 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cm245"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.168848 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.169678 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.170812 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.172291 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9ppnb"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.173291 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.173544 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.175959 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-r7v8v"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.178976 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9hj2c"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.180126 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.180147 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-vjcp4"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.182670 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-q7g9p"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.184368 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-s7hdh"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.186597 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.188181 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9ppnb"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.189861 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.191771 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-x4fgp"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.192580 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.193870 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.195075 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.196511 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-8vjpx"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.197249 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.197517 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9m8m9"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.197931 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cfdcm\" (UniqueName: \"kubernetes.io/projected/0ec0d7d6-9f6a-43cb-984b-4e162c07da70-kube-api-access-cfdcm\") pod \"dns-operator-744455d44c-9hxcf\" (UID: \"0ec0d7d6-9f6a-43cb-984b-4e162c07da70\") " pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198208 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6670b373-8502-4db3-9f27-461eca66f043-images\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198239 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-config\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198257 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-serving-cert\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198274 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-etcd-serving-ca\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198292 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-service-ca\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198312 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c6gvr\" (UniqueName: \"kubernetes.io/projected/71af7827-a6d5-4a87-9839-996ab528213d-kube-api-access-c6gvr\") pod \"downloads-7954f5f757-r6pqv\" (UID: \"71af7827-a6d5-4a87-9839-996ab528213d\") " pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198331 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-config\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198352 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-serving-cert\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198375 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198393 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198429 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b90e90f-8fb3-440c-b0d3-20f9582764ea-config\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198473 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lp9dj\" (UniqueName: \"kubernetes.io/projected/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-kube-api-access-lp9dj\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198489 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-config\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198505 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxc5c\" (UniqueName: \"kubernetes.io/projected/feedb992-610e-4ceb-84f1-7d5a005d7826-kube-api-access-nxc5c\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198535 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s24mk\" (UniqueName: \"kubernetes.io/projected/032450eb-6d0d-4cc2-90ee-c1ae7228f735-kube-api-access-s24mk\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198580 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-service-ca\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198600 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-serving-cert\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198614 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-client\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198631 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/032450eb-6d0d-4cc2-90ee-c1ae7228f735-serving-cert\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198646 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-trusted-ca-bundle\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198661 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ec0d7d6-9f6a-43cb-984b-4e162c07da70-metrics-tls\") pod \"dns-operator-744455d44c-9hxcf\" (UID: \"0ec0d7d6-9f6a-43cb-984b-4e162c07da70\") " pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198675 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-serving-cert\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198690 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/feedb992-610e-4ceb-84f1-7d5a005d7826-audit-dir\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198705 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5b90e90f-8fb3-440c-b0d3-20f9582764ea-machine-approver-tls\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198721 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-trusted-ca\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198736 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6670b373-8502-4db3-9f27-461eca66f043-proxy-tls\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198763 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-encryption-config\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198784 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-oauth-config\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198799 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-oauth-serving-cert\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198814 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6670b373-8502-4db3-9f27-461eca66f043-auth-proxy-config\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198829 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-etcd-client\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198844 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-image-import-ca\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198861 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c84k4\" (UniqueName: \"kubernetes.io/projected/6670b373-8502-4db3-9f27-461eca66f043-kube-api-access-c84k4\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198877 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-ca\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198893 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/feedb992-610e-4ceb-84f1-7d5a005d7826-node-pullsecrets\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198913 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-audit\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198928 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5b90e90f-8fb3-440c-b0d3-20f9582764ea-auth-proxy-config\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198944 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7zfcg\" (UniqueName: \"kubernetes.io/projected/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-kube-api-access-7zfcg\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198960 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cc27s\" (UniqueName: \"kubernetes.io/projected/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-kube-api-access-cc27s\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198978 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-config\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.198995 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2j856\" (UniqueName: \"kubernetes.io/projected/5b90e90f-8fb3-440c-b0d3-20f9582764ea-kube-api-access-2j856\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.199341 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-8vjpx"] Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.200014 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-config\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.200811 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/feedb992-610e-4ceb-84f1-7d5a005d7826-audit-dir\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.201620 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.201702 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-service-ca\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.202006 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-config\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.202021 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-oauth-serving-cert\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.202124 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-etcd-serving-ca\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.202168 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/feedb992-610e-4ceb-84f1-7d5a005d7826-node-pullsecrets\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.202985 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-config\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.203005 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/6670b373-8502-4db3-9f27-461eca66f043-auth-proxy-config\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.203504 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-available-featuregates\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.203912 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-audit\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.204650 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-image-import-ca\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.205035 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/feedb992-610e-4ceb-84f1-7d5a005d7826-trusted-ca-bundle\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.205470 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5b90e90f-8fb3-440c-b0d3-20f9582764ea-config\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.205635 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-trusted-ca-bundle\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.206295 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-trusted-ca\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.206340 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/5b90e90f-8fb3-440c-b0d3-20f9582764ea-machine-approver-tls\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.207069 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-etcd-client\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.207509 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-oauth-config\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.207613 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-serving-cert\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.208476 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-encryption-config\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.208832 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-serving-cert\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.209086 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/feedb992-610e-4ceb-84f1-7d5a005d7826-serving-cert\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.210104 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-serving-cert\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.219765 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.240167 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.249773 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/5b90e90f-8fb3-440c-b0d3-20f9582764ea-auth-proxy-config\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.259866 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.279801 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.299528 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.323077 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.339948 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.359648 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.379398 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.400224 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.419574 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.440483 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.459849 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.479025 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.500287 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.519099 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.539855 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.560038 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.580460 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.587581 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/032450eb-6d0d-4cc2-90ee-c1ae7228f735-serving-cert\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.600073 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.608176 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/0ec0d7d6-9f6a-43cb-984b-4e162c07da70-metrics-tls\") pod \"dns-operator-744455d44c-9hxcf\" (UID: \"0ec0d7d6-9f6a-43cb-984b-4e162c07da70\") " pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.620562 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.639608 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.643388 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-config\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.660083 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.680934 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.699981 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.709139 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/6670b373-8502-4db3-9f27-461eca66f043-proxy-tls\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.721018 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.740527 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.760141 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.763943 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-service-ca\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.780271 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.788359 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-client\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.820118 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.838562 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/032450eb-6d0d-4cc2-90ee-c1ae7228f735-etcd-ca\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.838693 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6670b373-8502-4db3-9f27-461eca66f043-images\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.840041 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.860039 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.880268 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.900448 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.921134 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.940712 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.959242 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Oct 02 10:55:08 crc kubenswrapper[4783]: I1002 10:55:08.980159 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.000507 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.020952 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.040029 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.060168 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.078477 4783 request.go:700] Waited for 1.00478984s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/configmaps?fieldSelector=metadata.name%3Dkube-root-ca.crt&limit=500&resourceVersion=0 Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.079811 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.099250 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.140855 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.160101 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.179368 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.199338 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.219972 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.240183 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.261552 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.280535 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.301817 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.320985 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.340615 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.359950 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.380616 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.400214 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.426858 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.440201 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.460672 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.480734 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.499664 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.520623 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.539869 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.560815 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.607555 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fm8hm\" (UniqueName: \"kubernetes.io/projected/20842d7f-97fb-42e7-80ff-f51a26a55970-kube-api-access-fm8hm\") pod \"openshift-apiserver-operator-796bbdcf4f-9d7f5\" (UID: \"20842d7f-97fb-42e7-80ff-f51a26a55970\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.637960 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hppqz\" (UniqueName: \"kubernetes.io/projected/50389666-bf4f-4442-a4cd-f3609994ce1b-kube-api-access-hppqz\") pod \"machine-api-operator-5694c8668f-xlxdx\" (UID: \"50389666-bf4f-4442-a4cd-f3609994ce1b\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.638519 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgspg\" (UniqueName: \"kubernetes.io/projected/01de2d55-d330-461b-b801-0bfe3078baab-kube-api-access-vgspg\") pod \"oauth-openshift-558db77b4-4cnvp\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.639758 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.660228 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.682018 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.699914 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.705164 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.759984 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jtxl\" (UniqueName: \"kubernetes.io/projected/a1ba5a71-7a04-4446-8459-1748799af4db-kube-api-access-9jtxl\") pod \"apiserver-7bbb656c7d-hw769\" (UID: \"a1ba5a71-7a04-4446-8459-1748799af4db\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.760544 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.767210 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f2pbd\" (UniqueName: \"kubernetes.io/projected/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-kube-api-access-f2pbd\") pod \"route-controller-manager-6576b87f9c-zbtp8\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.775894 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.782640 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.789573 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.799800 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.820349 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.839581 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.860602 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.880563 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.900473 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.921204 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.941823 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.961001 4783 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Oct 02 10:55:09 crc kubenswrapper[4783]: I1002 10:55:09.980733 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.000090 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.020074 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.022980 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.030870 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4cnvp"] Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.034027 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.040143 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Oct 02 10:55:10 crc kubenswrapper[4783]: W1002 10:55:10.047493 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod01de2d55_d330_461b_b801_0bfe3078baab.slice/crio-6c94bf68991a7790ae11eed88af4adf040fb421890d8e26bfc5abe2ce86791f9 WatchSource:0}: Error finding container 6c94bf68991a7790ae11eed88af4adf040fb421890d8e26bfc5abe2ce86791f9: Status 404 returned error can't find the container with id 6c94bf68991a7790ae11eed88af4adf040fb421890d8e26bfc5abe2ce86791f9 Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.074508 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2j856\" (UniqueName: \"kubernetes.io/projected/5b90e90f-8fb3-440c-b0d3-20f9582764ea-kube-api-access-2j856\") pod \"machine-approver-56656f9798-x2ljx\" (UID: \"5b90e90f-8fb3-440c-b0d3-20f9582764ea\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.094736 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cfdcm\" (UniqueName: \"kubernetes.io/projected/0ec0d7d6-9f6a-43cb-984b-4e162c07da70-kube-api-access-cfdcm\") pod \"dns-operator-744455d44c-9hxcf\" (UID: \"0ec0d7d6-9f6a-43cb-984b-4e162c07da70\") " pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.098505 4783 request.go:700] Waited for 1.898229613s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-console/serviceaccounts/console/token Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.118264 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lp9dj\" (UniqueName: \"kubernetes.io/projected/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-kube-api-access-lp9dj\") pod \"console-f9d7485db-vjcp4\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.127137 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.133629 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c6gvr\" (UniqueName: \"kubernetes.io/projected/71af7827-a6d5-4a87-9839-996ab528213d-kube-api-access-c6gvr\") pod \"downloads-7954f5f757-r6pqv\" (UID: \"71af7827-a6d5-4a87-9839-996ab528213d\") " pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.157585 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cc27s\" (UniqueName: \"kubernetes.io/projected/12e7e4aa-75cb-41c1-8d03-8eea90096e8c-kube-api-access-cc27s\") pod \"console-operator-58897d9998-x9scv\" (UID: \"12e7e4aa-75cb-41c1-8d03-8eea90096e8c\") " pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.174068 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c84k4\" (UniqueName: \"kubernetes.io/projected/6670b373-8502-4db3-9f27-461eca66f043-kube-api-access-c84k4\") pod \"machine-config-operator-74547568cd-d2npj\" (UID: \"6670b373-8502-4db3-9f27-461eca66f043\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.193821 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7zfcg\" (UniqueName: \"kubernetes.io/projected/e435d9c9-4a33-4c0d-bb2b-84aa5e988124-kube-api-access-7zfcg\") pod \"openshift-config-operator-7777fb866f-cm245\" (UID: \"e435d9c9-4a33-4c0d-bb2b-84aa5e988124\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.204801 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.219254 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxc5c\" (UniqueName: \"kubernetes.io/projected/feedb992-610e-4ceb-84f1-7d5a005d7826-kube-api-access-nxc5c\") pod \"apiserver-76f77b778f-dzxd5\" (UID: \"feedb992-610e-4ceb-84f1-7d5a005d7826\") " pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.230467 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-xlxdx"] Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.230951 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5"] Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.233879 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s24mk\" (UniqueName: \"kubernetes.io/projected/032450eb-6d0d-4cc2-90ee-c1ae7228f735-kube-api-access-s24mk\") pod \"etcd-operator-b45778765-9hj2c\" (UID: \"032450eb-6d0d-4cc2-90ee-c1ae7228f735\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.258277 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769"] Oct 02 10:55:10 crc kubenswrapper[4783]: W1002 10:55:10.267440 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1ba5a71_7a04_4446_8459_1748799af4db.slice/crio-9f971d13fc120f11dab1362ddff0b2a19784e28f53ee65e43449c938c74ddad4 WatchSource:0}: Error finding container 9f971d13fc120f11dab1362ddff0b2a19784e28f53ee65e43449c938c74ddad4: Status 404 returned error can't find the container with id 9f971d13fc120f11dab1362ddff0b2a19784e28f53ee65e43449c938c74ddad4 Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.282155 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.292591 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.308193 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" event={"ID":"20842d7f-97fb-42e7-80ff-f51a26a55970","Type":"ContainerStarted","Data":"d793d3383ef62e128066eed958c1f71d3d35581370473834d42a33be282d78eb"} Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.310060 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" event={"ID":"a1ba5a71-7a04-4446-8459-1748799af4db","Type":"ContainerStarted","Data":"9f971d13fc120f11dab1362ddff0b2a19784e28f53ee65e43449c938c74ddad4"} Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.311077 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" event={"ID":"50389666-bf4f-4442-a4cd-f3609994ce1b","Type":"ContainerStarted","Data":"996c79ad6380fe4e66725a0ed56ed9cb527c773ea14d9b14c290255283dcca60"} Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.311835 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" event={"ID":"5b90e90f-8fb3-440c-b0d3-20f9582764ea","Type":"ContainerStarted","Data":"b1646b8532af9e9754c1711fe7df9f6aab3b3660444ba2691e186970c9e46b7f"} Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.312390 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" event={"ID":"01de2d55-d330-461b-b801-0bfe3078baab","Type":"ContainerStarted","Data":"6c94bf68991a7790ae11eed88af4adf040fb421890d8e26bfc5abe2ce86791f9"} Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.330953 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-trusted-ca\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.330982 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-config\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.330999 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/baa575d2-ecb0-4e64-b0fe-361fec92e555-serving-cert\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331017 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331056 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73cf839f-ec02-4db7-b640-e1c0783fafc9-config\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331071 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-client-ca\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331087 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-config\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331119 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zt5xd\" (UniqueName: \"kubernetes.io/projected/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-kube-api-access-zt5xd\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331134 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-bound-sa-token\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331148 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-serving-cert\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331165 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-service-ca-bundle\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331192 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb03ecb8-df6d-404e-b248-90ca5372ecf9-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331227 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331247 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-tls\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331263 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-default-certificate\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331280 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73cf839f-ec02-4db7-b640-e1c0783fafc9-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331311 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331330 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqn2g\" (UniqueName: \"kubernetes.io/projected/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-kube-api-access-mqn2g\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331348 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb03ecb8-df6d-404e-b248-90ca5372ecf9-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331383 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-certificates\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331401 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llvqw\" (UniqueName: \"kubernetes.io/projected/02091d48-2a3f-4efc-a173-fc17fb2f3f9a-kube-api-access-llvqw\") pod \"multus-admission-controller-857f4d67dd-x4fgp\" (UID: \"02091d48-2a3f-4efc-a173-fc17fb2f3f9a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331434 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/02091d48-2a3f-4efc-a173-fc17fb2f3f9a-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-x4fgp\" (UID: \"02091d48-2a3f-4efc-a173-fc17fb2f3f9a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331449 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qtxtk\" (UniqueName: \"kubernetes.io/projected/1b89cc5b-178f-4c08-b588-979dc2393bae-kube-api-access-qtxtk\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331476 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vs58m\" (UniqueName: \"kubernetes.io/projected/baa575d2-ecb0-4e64-b0fe-361fec92e555-kube-api-access-vs58m\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331505 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331521 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pr7xh\" (UniqueName: \"kubernetes.io/projected/7567048e-a0b0-46e5-b4bf-51180f84b884-kube-api-access-pr7xh\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331536 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331549 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331566 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73cf839f-ec02-4db7-b640-e1c0783fafc9-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331582 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-installation-pull-secrets\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331605 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331621 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-metrics-certs\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331644 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-config\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331658 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b89cc5b-178f-4c08-b588-979dc2393bae-trusted-ca\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331674 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b9581e6c-3a17-4a11-bf2e-c24f90ececca-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-lmvhj\" (UID: \"b9581e6c-3a17-4a11-bf2e-c24f90ececca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331692 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-stats-auth\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331707 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1b89cc5b-178f-4c08-b588-979dc2393bae-metrics-tls\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331722 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7567048e-a0b0-46e5-b4bf-51180f84b884-service-ca-bundle\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331748 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-ca-trust-extracted\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331777 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.331794 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n97rr\" (UniqueName: \"kubernetes.io/projected/eb03ecb8-df6d-404e-b248-90ca5372ecf9-kube-api-access-n97rr\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.333097 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dft9f\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-kube-api-access-dft9f\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.333121 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1b89cc5b-178f-4c08-b588-979dc2393bae-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.333143 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:10.833127603 +0000 UTC m=+144.149321854 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.333174 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dp448\" (UniqueName: \"kubernetes.io/projected/b9581e6c-3a17-4a11-bf2e-c24f90ececca-kube-api-access-dp448\") pod \"cluster-samples-operator-665b6dd947-lmvhj\" (UID: \"b9581e6c-3a17-4a11-bf2e-c24f90ececca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.373234 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-vjcp4"] Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.376635 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.386077 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.392943 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.403748 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.410297 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8"] Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433604 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.433721 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:10.933696518 +0000 UTC m=+144.249890779 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433810 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s64j4\" (UniqueName: \"kubernetes.io/projected/d507ecac-9761-4886-8acd-daff78a5d360-kube-api-access-s64j4\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433845 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-mountpoint-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433872 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433907 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-tls\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433926 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-default-certificate\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433944 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-79s2g\" (UniqueName: \"kubernetes.io/projected/093341b6-a75f-4acc-82fe-cddcc303f189-kube-api-access-79s2g\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433962 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73cf839f-ec02-4db7-b640-e1c0783fafc9-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.433977 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/093341b6-a75f-4acc-82fe-cddcc303f189-node-bootstrap-token\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434000 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434018 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17c756cd-c7ba-4efa-850d-7a9aff74099d-config-volume\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434039 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqn2g\" (UniqueName: \"kubernetes.io/projected/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-kube-api-access-mqn2g\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434053 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-socket-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434067 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9675dc45-ac16-4440-a4b1-2c3cfeff2459-config-volume\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434087 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb03ecb8-df6d-404e-b248-90ca5372ecf9-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434117 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d972c88-073c-4259-bc37-b77d1c0a0bfe-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434132 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434161 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nb8sq\" (UniqueName: \"kubernetes.io/projected/33db5790-d9e7-4599-b8c7-7578ccb9940a-kube-api-access-nb8sq\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434179 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a6d0e458-bd0f-402c-8cad-db6eba1e45ff-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-8mjk7\" (UID: \"a6d0e458-bd0f-402c-8cad-db6eba1e45ff\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434198 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-certificates\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434212 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-csi-data-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434228 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b03dc3cf-caec-4c8a-86e3-db542117eef3-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434246 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llvqw\" (UniqueName: \"kubernetes.io/projected/02091d48-2a3f-4efc-a173-fc17fb2f3f9a-kube-api-access-llvqw\") pod \"multus-admission-controller-857f4d67dd-x4fgp\" (UID: \"02091d48-2a3f-4efc-a173-fc17fb2f3f9a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434263 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/02091d48-2a3f-4efc-a173-fc17fb2f3f9a-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-x4fgp\" (UID: \"02091d48-2a3f-4efc-a173-fc17fb2f3f9a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434280 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/33db5790-d9e7-4599-b8c7-7578ccb9940a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434297 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq79g\" (UniqueName: \"kubernetes.io/projected/9675dc45-ac16-4440-a4b1-2c3cfeff2459-kube-api-access-bq79g\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434322 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qtxtk\" (UniqueName: \"kubernetes.io/projected/1b89cc5b-178f-4c08-b588-979dc2393bae-kube-api-access-qtxtk\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434337 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b03dc3cf-caec-4c8a-86e3-db542117eef3-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434352 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434393 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mvr7d\" (UniqueName: \"kubernetes.io/projected/17c756cd-c7ba-4efa-850d-7a9aff74099d-kube-api-access-mvr7d\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434479 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vs58m\" (UniqueName: \"kubernetes.io/projected/baa575d2-ecb0-4e64-b0fe-361fec92e555-kube-api-access-vs58m\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434498 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/daa53316-d347-4aaf-b33d-9b6f63757502-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434526 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-srv-cert\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434546 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434567 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1577f6a5-596e-445c-995d-ffe75fe194b0-cert\") pod \"ingress-canary-s7hdh\" (UID: \"1577f6a5-596e-445c-995d-ffe75fe194b0\") " pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434586 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pr7xh\" (UniqueName: \"kubernetes.io/projected/7567048e-a0b0-46e5-b4bf-51180f84b884-kube-api-access-pr7xh\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434602 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17c756cd-c7ba-4efa-850d-7a9aff74099d-secret-volume\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434617 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c1efff0-350e-4626-b854-d818e5b5f7f7-config\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434642 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9675dc45-ac16-4440-a4b1-2c3cfeff2459-metrics-tls\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434662 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434679 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434694 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z6v54\" (UniqueName: \"kubernetes.io/projected/daa53316-d347-4aaf-b33d-9b6f63757502-kube-api-access-z6v54\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434724 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgcc7\" (UniqueName: \"kubernetes.io/projected/6c1efff0-350e-4626-b854-d818e5b5f7f7-kube-api-access-wgcc7\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434739 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fnmb\" (UniqueName: \"kubernetes.io/projected/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-kube-api-access-5fnmb\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434772 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73cf839f-ec02-4db7-b640-e1c0783fafc9-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.434789 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0ee74d4b-8751-4089-a0e6-a99132950452-signing-key\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435114 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-installation-pull-secrets\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435147 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-plugins-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435170 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435187 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-metrics-certs\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435225 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-config\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435814 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eb03ecb8-df6d-404e-b248-90ca5372ecf9-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435917 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b89cc5b-178f-4c08-b588-979dc2393bae-trusted-ca\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435949 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b9581e6c-3a17-4a11-bf2e-c24f90ececca-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-lmvhj\" (UID: \"b9581e6c-3a17-4a11-bf2e-c24f90ececca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435971 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/daa53316-d347-4aaf-b33d-9b6f63757502-proxy-tls\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.435986 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d507ecac-9761-4886-8acd-daff78a5d360-webhook-cert\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436225 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/33db5790-d9e7-4599-b8c7-7578ccb9940a-srv-cert\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436256 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9frm2\" (UniqueName: \"kubernetes.io/projected/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-kube-api-access-9frm2\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436282 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/093341b6-a75f-4acc-82fe-cddcc303f189-certs\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436301 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vbvxw\" (UniqueName: \"kubernetes.io/projected/a6d0e458-bd0f-402c-8cad-db6eba1e45ff-kube-api-access-vbvxw\") pod \"control-plane-machine-set-operator-78cbb6b69f-8mjk7\" (UID: \"a6d0e458-bd0f-402c-8cad-db6eba1e45ff\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436320 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fd44s\" (UniqueName: \"kubernetes.io/projected/1577f6a5-596e-445c-995d-ffe75fe194b0-kube-api-access-fd44s\") pod \"ingress-canary-s7hdh\" (UID: \"1577f6a5-596e-445c-995d-ffe75fe194b0\") " pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436340 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsgzp\" (UniqueName: \"kubernetes.io/projected/2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a-kube-api-access-fsgzp\") pod \"migrator-59844c95c7-t4hbs\" (UID: \"2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436389 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-certificates\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.436683 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-stats-auth\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437047 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1b89cc5b-178f-4c08-b588-979dc2393bae-metrics-tls\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437077 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7567048e-a0b0-46e5-b4bf-51180f84b884-service-ca-bundle\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437126 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d507ecac-9761-4886-8acd-daff78a5d360-apiservice-cert\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437259 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cfnv\" (UniqueName: \"kubernetes.io/projected/a053c888-10fd-4b11-bc05-963d0a7ab8ad-kube-api-access-4cfnv\") pod \"package-server-manager-789f6589d5-rfpcx\" (UID: \"a053c888-10fd-4b11-bc05-963d0a7ab8ad\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437284 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-registration-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437314 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-ca-trust-extracted\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437331 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a053c888-10fd-4b11-bc05-963d0a7ab8ad-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rfpcx\" (UID: \"a053c888-10fd-4b11-bc05-963d0a7ab8ad\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437360 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437376 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n97rr\" (UniqueName: \"kubernetes.io/projected/eb03ecb8-df6d-404e-b248-90ca5372ecf9-kube-api-access-n97rr\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437394 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c1efff0-350e-4626-b854-d818e5b5f7f7-serving-cert\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437426 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0ee74d4b-8751-4089-a0e6-a99132950452-signing-cabundle\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437467 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dft9f\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-kube-api-access-dft9f\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437483 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kz4wl\" (UniqueName: \"kubernetes.io/projected/f2ba3d51-3014-4233-931b-adaa45c937dd-kube-api-access-kz4wl\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437511 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1b89cc5b-178f-4c08-b588-979dc2393bae-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437657 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dp448\" (UniqueName: \"kubernetes.io/projected/b9581e6c-3a17-4a11-bf2e-c24f90ececca-kube-api-access-dp448\") pod \"cluster-samples-operator-665b6dd947-lmvhj\" (UID: \"b9581e6c-3a17-4a11-bf2e-c24f90ececca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437686 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dh5mm\" (UniqueName: \"kubernetes.io/projected/0ee74d4b-8751-4089-a0e6-a99132950452-kube-api-access-dh5mm\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437709 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d507ecac-9761-4886-8acd-daff78a5d360-tmpfs\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437727 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-trusted-ca\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437744 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-config\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437756 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/7567048e-a0b0-46e5-b4bf-51180f84b884-service-ca-bundle\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437762 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/baa575d2-ecb0-4e64-b0fe-361fec92e555-serving-cert\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437798 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.437830 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:10.937817026 +0000 UTC m=+144.254011287 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437845 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d972c88-073c-4259-bc37-b77d1c0a0bfe-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.437850 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.438216 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73cf839f-ec02-4db7-b640-e1c0783fafc9-config\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.438271 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-ca-trust-extracted\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.438596 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-client-ca\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.438675 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/73cf839f-ec02-4db7-b640-e1c0783fafc9-config\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.438763 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-config\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.439272 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-client-ca\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.439298 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-config\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.439458 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zt5xd\" (UniqueName: \"kubernetes.io/projected/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-kube-api-access-zt5xd\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.439485 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-bound-sa-token\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.439505 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-serving-cert\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.439522 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-service-ca-bundle\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.439551 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-tls\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.440024 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/02091d48-2a3f-4efc-a173-fc17fb2f3f9a-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-x4fgp\" (UID: \"02091d48-2a3f-4efc-a173-fc17fb2f3f9a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.440365 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-trusted-ca\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.440674 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.440804 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb03ecb8-df6d-404e-b248-90ca5372ecf9-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.440843 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b03dc3cf-caec-4c8a-86e3-db542117eef3-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.440890 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/baa575d2-ecb0-4e64-b0fe-361fec92e555-serving-cert\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.441267 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-config\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.441346 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dsxwv\" (UniqueName: \"kubernetes.io/projected/9d972c88-073c-4259-bc37-b77d1c0a0bfe-kube-api-access-dsxwv\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.441484 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.442626 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.442780 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b9581e6c-3a17-4a11-bf2e-c24f90ececca-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-lmvhj\" (UID: \"b9581e6c-3a17-4a11-bf2e-c24f90ececca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.442854 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/eb03ecb8-df6d-404e-b248-90ca5372ecf9-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.443025 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-installation-pull-secrets\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.443340 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/73cf839f-ec02-4db7-b640-e1c0783fafc9-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.443490 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.457745 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-default-certificate\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.458037 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-metrics-certs\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.458759 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-config\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.459045 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-serving-cert\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.459730 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.459846 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/7567048e-a0b0-46e5-b4bf-51180f84b884-stats-auth\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.461338 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-service-ca-bundle\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.461930 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.462361 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/1b89cc5b-178f-4c08-b588-979dc2393bae-trusted-ca\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.466920 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/1b89cc5b-178f-4c08-b588-979dc2393bae-metrics-tls\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.476326 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqn2g\" (UniqueName: \"kubernetes.io/projected/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-kube-api-access-mqn2g\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.493668 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vs58m\" (UniqueName: \"kubernetes.io/projected/baa575d2-ecb0-4e64-b0fe-361fec92e555-kube-api-access-vs58m\") pod \"controller-manager-879f6c89f-r7v8v\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.513937 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qtxtk\" (UniqueName: \"kubernetes.io/projected/1b89cc5b-178f-4c08-b588-979dc2393bae-kube-api-access-qtxtk\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.537145 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/73cf839f-ec02-4db7-b640-e1c0783fafc9-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-mktxk\" (UID: \"73cf839f-ec02-4db7-b640-e1c0783fafc9\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545529 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545670 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d972c88-073c-4259-bc37-b77d1c0a0bfe-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545690 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545711 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nb8sq\" (UniqueName: \"kubernetes.io/projected/33db5790-d9e7-4599-b8c7-7578ccb9940a-kube-api-access-nb8sq\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545731 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a6d0e458-bd0f-402c-8cad-db6eba1e45ff-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-8mjk7\" (UID: \"a6d0e458-bd0f-402c-8cad-db6eba1e45ff\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545754 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-csi-data-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545769 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b03dc3cf-caec-4c8a-86e3-db542117eef3-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545794 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/33db5790-d9e7-4599-b8c7-7578ccb9940a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545809 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq79g\" (UniqueName: \"kubernetes.io/projected/9675dc45-ac16-4440-a4b1-2c3cfeff2459-kube-api-access-bq79g\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545825 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b03dc3cf-caec-4c8a-86e3-db542117eef3-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545841 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545857 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mvr7d\" (UniqueName: \"kubernetes.io/projected/17c756cd-c7ba-4efa-850d-7a9aff74099d-kube-api-access-mvr7d\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545879 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/daa53316-d347-4aaf-b33d-9b6f63757502-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545906 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-srv-cert\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545924 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1577f6a5-596e-445c-995d-ffe75fe194b0-cert\") pod \"ingress-canary-s7hdh\" (UID: \"1577f6a5-596e-445c-995d-ffe75fe194b0\") " pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545945 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17c756cd-c7ba-4efa-850d-7a9aff74099d-secret-volume\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545962 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c1efff0-350e-4626-b854-d818e5b5f7f7-config\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545975 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9675dc45-ac16-4440-a4b1-2c3cfeff2459-metrics-tls\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.545995 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z6v54\" (UniqueName: \"kubernetes.io/projected/daa53316-d347-4aaf-b33d-9b6f63757502-kube-api-access-z6v54\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546009 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgcc7\" (UniqueName: \"kubernetes.io/projected/6c1efff0-350e-4626-b854-d818e5b5f7f7-kube-api-access-wgcc7\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546024 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fnmb\" (UniqueName: \"kubernetes.io/projected/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-kube-api-access-5fnmb\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546046 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0ee74d4b-8751-4089-a0e6-a99132950452-signing-key\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546074 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-plugins-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546099 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/daa53316-d347-4aaf-b33d-9b6f63757502-proxy-tls\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546113 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d507ecac-9761-4886-8acd-daff78a5d360-webhook-cert\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546128 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/33db5790-d9e7-4599-b8c7-7578ccb9940a-srv-cert\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546143 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9frm2\" (UniqueName: \"kubernetes.io/projected/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-kube-api-access-9frm2\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546161 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/093341b6-a75f-4acc-82fe-cddcc303f189-certs\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.546177 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vbvxw\" (UniqueName: \"kubernetes.io/projected/a6d0e458-bd0f-402c-8cad-db6eba1e45ff-kube-api-access-vbvxw\") pod \"control-plane-machine-set-operator-78cbb6b69f-8mjk7\" (UID: \"a6d0e458-bd0f-402c-8cad-db6eba1e45ff\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.547843 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.046634457 +0000 UTC m=+144.362828708 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.547952 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fd44s\" (UniqueName: \"kubernetes.io/projected/1577f6a5-596e-445c-995d-ffe75fe194b0-kube-api-access-fd44s\") pod \"ingress-canary-s7hdh\" (UID: \"1577f6a5-596e-445c-995d-ffe75fe194b0\") " pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.547995 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsgzp\" (UniqueName: \"kubernetes.io/projected/2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a-kube-api-access-fsgzp\") pod \"migrator-59844c95c7-t4hbs\" (UID: \"2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548033 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d507ecac-9761-4886-8acd-daff78a5d360-apiservice-cert\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548058 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cfnv\" (UniqueName: \"kubernetes.io/projected/a053c888-10fd-4b11-bc05-963d0a7ab8ad-kube-api-access-4cfnv\") pod \"package-server-manager-789f6589d5-rfpcx\" (UID: \"a053c888-10fd-4b11-bc05-963d0a7ab8ad\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548080 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-registration-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548118 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a053c888-10fd-4b11-bc05-963d0a7ab8ad-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rfpcx\" (UID: \"a053c888-10fd-4b11-bc05-963d0a7ab8ad\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548150 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548183 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c1efff0-350e-4626-b854-d818e5b5f7f7-serving-cert\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548212 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0ee74d4b-8751-4089-a0e6-a99132950452-signing-cabundle\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.548785 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-plugins-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.552713 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/093341b6-a75f-4acc-82fe-cddcc303f189-certs\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.552943 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.553015 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kz4wl\" (UniqueName: \"kubernetes.io/projected/f2ba3d51-3014-4233-931b-adaa45c937dd-kube-api-access-kz4wl\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.553060 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dh5mm\" (UniqueName: \"kubernetes.io/projected/0ee74d4b-8751-4089-a0e6-a99132950452-kube-api-access-dh5mm\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.553083 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d507ecac-9761-4886-8acd-daff78a5d360-tmpfs\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.553116 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d972c88-073c-4259-bc37-b77d1c0a0bfe-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.553246 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b03dc3cf-caec-4c8a-86e3-db542117eef3-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.554226 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d507ecac-9761-4886-8acd-daff78a5d360-apiservice-cert\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.554394 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-srv-cert\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.554474 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-csi-data-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.555069 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/daa53316-d347-4aaf-b33d-9b6f63757502-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.555513 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/daa53316-d347-4aaf-b33d-9b6f63757502-proxy-tls\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.555820 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-registration-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.556214 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/1577f6a5-596e-445c-995d-ffe75fe194b0-cert\") pod \"ingress-canary-s7hdh\" (UID: \"1577f6a5-596e-445c-995d-ffe75fe194b0\") " pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.556502 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/9675dc45-ac16-4440-a4b1-2c3cfeff2459-metrics-tls\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.556598 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.056565228 +0000 UTC m=+144.372759489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.557561 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d972c88-073c-4259-bc37-b77d1c0a0bfe-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.558155 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/0ee74d4b-8751-4089-a0e6-a99132950452-signing-cabundle\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.558505 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/d507ecac-9761-4886-8acd-daff78a5d360-tmpfs\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.558542 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/33db5790-d9e7-4599-b8c7-7578ccb9940a-profile-collector-cert\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.557383 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6c1efff0-350e-4626-b854-d818e5b5f7f7-serving-cert\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.559397 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6c1efff0-350e-4626-b854-d818e5b5f7f7-config\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.559490 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dsxwv\" (UniqueName: \"kubernetes.io/projected/9d972c88-073c-4259-bc37-b77d1c0a0bfe-kube-api-access-dsxwv\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.559666 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b03dc3cf-caec-4c8a-86e3-db542117eef3-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.559711 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s64j4\" (UniqueName: \"kubernetes.io/projected/d507ecac-9761-4886-8acd-daff78a5d360-kube-api-access-s64j4\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.559753 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-mountpoint-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.559962 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-mountpoint-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.560004 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.560284 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d972c88-073c-4259-bc37-b77d1c0a0bfe-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.561134 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/a6d0e458-bd0f-402c-8cad-db6eba1e45ff-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-8mjk7\" (UID: \"a6d0e458-bd0f-402c-8cad-db6eba1e45ff\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.561344 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.561935 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17c756cd-c7ba-4efa-850d-7a9aff74099d-secret-volume\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.562136 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-79s2g\" (UniqueName: \"kubernetes.io/projected/093341b6-a75f-4acc-82fe-cddcc303f189-kube-api-access-79s2g\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.562240 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/093341b6-a75f-4acc-82fe-cddcc303f189-node-bootstrap-token\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.562450 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17c756cd-c7ba-4efa-850d-7a9aff74099d-config-volume\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.563043 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-socket-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.563065 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9675dc45-ac16-4440-a4b1-2c3cfeff2459-config-volume\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.563630 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/9675dc45-ac16-4440-a4b1-2c3cfeff2459-config-volume\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.563681 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f2ba3d51-3014-4233-931b-adaa45c937dd-socket-dir\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.564042 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/0ee74d4b-8751-4089-a0e6-a99132950452-signing-key\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.564142 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/a053c888-10fd-4b11-bc05-963d0a7ab8ad-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-rfpcx\" (UID: \"a053c888-10fd-4b11-bc05-963d0a7ab8ad\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.564211 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d507ecac-9761-4886-8acd-daff78a5d360-webhook-cert\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.564433 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/33db5790-d9e7-4599-b8c7-7578ccb9940a-srv-cert\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.564696 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b03dc3cf-caec-4c8a-86e3-db542117eef3-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.564698 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17c756cd-c7ba-4efa-850d-7a9aff74099d-config-volume\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.565979 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/093341b6-a75f-4acc-82fe-cddcc303f189-node-bootstrap-token\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.566069 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-profile-collector-cert\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.566713 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pr7xh\" (UniqueName: \"kubernetes.io/projected/7567048e-a0b0-46e5-b4bf-51180f84b884-kube-api-access-pr7xh\") pod \"router-default-5444994796-zwxct\" (UID: \"7567048e-a0b0-46e5-b4bf-51180f84b884\") " pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.574710 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.578162 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/88a3511a-30a6-4aa3-99d9-e65e05aaefc8-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-drj5j\" (UID: \"88a3511a-30a6-4aa3-99d9-e65e05aaefc8\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.601577 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.602265 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llvqw\" (UniqueName: \"kubernetes.io/projected/02091d48-2a3f-4efc-a173-fc17fb2f3f9a-kube-api-access-llvqw\") pod \"multus-admission-controller-857f4d67dd-x4fgp\" (UID: \"02091d48-2a3f-4efc-a173-fc17fb2f3f9a\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.616208 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-r6pqv"] Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.616723 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b741b1e1-2c96-4a89-82eb-0a4106dfc6bb-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8wv52\" (UID: \"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.635773 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n97rr\" (UniqueName: \"kubernetes.io/projected/eb03ecb8-df6d-404e-b248-90ca5372ecf9-kube-api-access-n97rr\") pod \"openshift-controller-manager-operator-756b6f6bc6-xknld\" (UID: \"eb03ecb8-df6d-404e-b248-90ca5372ecf9\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.638705 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.640387 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-cm245"] Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.655205 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.655715 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dft9f\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-kube-api-access-dft9f\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.662776 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.663786 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.663902 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.16387763 +0000 UTC m=+144.480071901 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.663946 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.664468 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.164456535 +0000 UTC m=+144.480650856 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.670643 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.677062 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-bound-sa-token\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.698055 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/1b89cc5b-178f-4c08-b588-979dc2393bae-bound-sa-token\") pod \"ingress-operator-5b745b69d9-xgcwj\" (UID: \"1b89cc5b-178f-4c08-b588-979dc2393bae\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.701894 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.713535 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dp448\" (UniqueName: \"kubernetes.io/projected/b9581e6c-3a17-4a11-bf2e-c24f90ececca-kube-api-access-dp448\") pod \"cluster-samples-operator-665b6dd947-lmvhj\" (UID: \"b9581e6c-3a17-4a11-bf2e-c24f90ececca\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.738385 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zt5xd\" (UniqueName: \"kubernetes.io/projected/0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb-kube-api-access-zt5xd\") pod \"authentication-operator-69f744f599-ql8ns\" (UID: \"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: W1002 10:55:10.741139 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71af7827_a6d5_4a87_9839_996ab528213d.slice/crio-a8ade701b5483d5629977a5e46dd9ece461855c3565d3ca935c5be990d086102 WatchSource:0}: Error finding container a8ade701b5483d5629977a5e46dd9ece461855c3565d3ca935c5be990d086102: Status 404 returned error can't find the container with id a8ade701b5483d5629977a5e46dd9ece461855c3565d3ca935c5be990d086102 Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.765222 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.765499 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.265451681 +0000 UTC m=+144.581645942 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.765829 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.766801 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.266725834 +0000 UTC m=+144.582920115 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.777489 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq79g\" (UniqueName: \"kubernetes.io/projected/9675dc45-ac16-4440-a4b1-2c3cfeff2459-kube-api-access-bq79g\") pod \"dns-default-8vjpx\" (UID: \"9675dc45-ac16-4440-a4b1-2c3cfeff2459\") " pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.782274 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.799920 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mvr7d\" (UniqueName: \"kubernetes.io/projected/17c756cd-c7ba-4efa-850d-7a9aff74099d-kube-api-access-mvr7d\") pod \"collect-profiles-29323365-465ld\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.814957 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fnmb\" (UniqueName: \"kubernetes.io/projected/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-kube-api-access-5fnmb\") pod \"marketplace-operator-79b997595-q7g9p\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.839366 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgcc7\" (UniqueName: \"kubernetes.io/projected/6c1efff0-350e-4626-b854-d818e5b5f7f7-kube-api-access-wgcc7\") pod \"service-ca-operator-777779d784-lqq4w\" (UID: \"6c1efff0-350e-4626-b854-d818e5b5f7f7\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.849941 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.860440 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fd44s\" (UniqueName: \"kubernetes.io/projected/1577f6a5-596e-445c-995d-ffe75fe194b0-kube-api-access-fd44s\") pod \"ingress-canary-s7hdh\" (UID: \"1577f6a5-596e-445c-995d-ffe75fe194b0\") " pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.866702 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.867124 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.867214 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.367193646 +0000 UTC m=+144.683387907 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.867497 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.867849 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.367836893 +0000 UTC m=+144.684031154 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.879539 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsgzp\" (UniqueName: \"kubernetes.io/projected/2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a-kube-api-access-fsgzp\") pod \"migrator-59844c95c7-t4hbs\" (UID: \"2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.904932 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/b03dc3cf-caec-4c8a-86e3-db542117eef3-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8544f\" (UID: \"b03dc3cf-caec-4c8a-86e3-db542117eef3\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.925241 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cfnv\" (UniqueName: \"kubernetes.io/projected/a053c888-10fd-4b11-bc05-963d0a7ab8ad-kube-api-access-4cfnv\") pod \"package-server-manager-789f6589d5-rfpcx\" (UID: \"a053c888-10fd-4b11-bc05-963d0a7ab8ad\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.940513 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z6v54\" (UniqueName: \"kubernetes.io/projected/daa53316-d347-4aaf-b33d-9b6f63757502-kube-api-access-z6v54\") pod \"machine-config-controller-84d6567774-6rrvt\" (UID: \"daa53316-d347-4aaf-b33d-9b6f63757502\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.947365 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.957910 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9frm2\" (UniqueName: \"kubernetes.io/projected/1d0ded20-7af7-44b2-9bd8-4a6066faa4d5-kube-api-access-9frm2\") pod \"catalog-operator-68c6474976-ncpvc\" (UID: \"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.968268 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.968473 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.468445489 +0000 UTC m=+144.784639750 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.968735 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:10 crc kubenswrapper[4783]: E1002 10:55:10.969075 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.469059275 +0000 UTC m=+144.785253536 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:10 crc kubenswrapper[4783]: I1002 10:55:10.976754 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kz4wl\" (UniqueName: \"kubernetes.io/projected/f2ba3d51-3014-4233-931b-adaa45c937dd-kube-api-access-kz4wl\") pod \"csi-hostpathplugin-9ppnb\" (UID: \"f2ba3d51-3014-4233-931b-adaa45c937dd\") " pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.006194 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dh5mm\" (UniqueName: \"kubernetes.io/projected/0ee74d4b-8751-4089-a0e6-a99132950452-kube-api-access-dh5mm\") pod \"service-ca-9c57cc56f-9m8m9\" (UID: \"0ee74d4b-8751-4089-a0e6-a99132950452\") " pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.006974 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.014862 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dsxwv\" (UniqueName: \"kubernetes.io/projected/9d972c88-073c-4259-bc37-b77d1c0a0bfe-kube-api-access-dsxwv\") pod \"kube-storage-version-migrator-operator-b67b599dd-2w77p\" (UID: \"9d972c88-073c-4259-bc37-b77d1c0a0bfe\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.014998 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.029686 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.033740 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vbvxw\" (UniqueName: \"kubernetes.io/projected/a6d0e458-bd0f-402c-8cad-db6eba1e45ff-kube-api-access-vbvxw\") pod \"control-plane-machine-set-operator-78cbb6b69f-8mjk7\" (UID: \"a6d0e458-bd0f-402c-8cad-db6eba1e45ff\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.045314 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.051783 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.054165 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s64j4\" (UniqueName: \"kubernetes.io/projected/d507ecac-9761-4886-8acd-daff78a5d360-kube-api-access-s64j4\") pod \"packageserver-d55dfcdfc-7qsc9\" (UID: \"d507ecac-9761-4886-8acd-daff78a5d360\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.061646 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.067823 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.069121 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.069353 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.569331361 +0000 UTC m=+144.885525622 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.069533 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.069909 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.569899896 +0000 UTC m=+144.886094167 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.080341 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.082368 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.085506 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nb8sq\" (UniqueName: \"kubernetes.io/projected/33db5790-d9e7-4599-b8c7-7578ccb9940a-kube-api-access-nb8sq\") pod \"olm-operator-6b444d44fb-js7h8\" (UID: \"33db5790-d9e7-4599-b8c7-7578ccb9940a\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.090233 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.099245 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-79s2g\" (UniqueName: \"kubernetes.io/projected/093341b6-a75f-4acc-82fe-cddcc303f189-kube-api-access-79s2g\") pod \"machine-config-server-rq6qg\" (UID: \"093341b6-a75f-4acc-82fe-cddcc303f189\") " pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.104787 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.111845 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.114269 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9hj2c"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.131967 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-s7hdh" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.144008 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.162582 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-x9scv"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.172332 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.172925 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.672896953 +0000 UTC m=+144.989091214 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.173201 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.173626 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.673619722 +0000 UTC m=+144.989813983 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.274929 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.275610 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.775595764 +0000 UTC m=+145.091790025 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.334453 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.337624 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.344591 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.356680 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" event={"ID":"a2328b20-92ab-42c6-a089-89ec4e1c4ffb","Type":"ContainerStarted","Data":"ef34a0462c5a3f5ab6c67a31c1075f20d109318bebac83abc00c6e8eade7f48d"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.357728 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" event={"ID":"20842d7f-97fb-42e7-80ff-f51a26a55970","Type":"ContainerStarted","Data":"546d4ff954a7787c2b2ea703a0eb580f3112a8f67e5a7ca3b7dae1f68a9fb2d1"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.358238 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" event={"ID":"e435d9c9-4a33-4c0d-bb2b-84aa5e988124","Type":"ContainerStarted","Data":"e36b619c7ab04a6a3aa35fac48ce7d48be58d4728f94dbf57f1833c1ef495498"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.359253 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" event={"ID":"a1ba5a71-7a04-4446-8459-1748799af4db","Type":"ContainerStarted","Data":"0e9c250c11b8896a249ea2fda0abf7afe87f2d9c3c6b361ef47241c7e4f04df9"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.360326 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" event={"ID":"01de2d55-d330-461b-b801-0bfe3078baab","Type":"ContainerStarted","Data":"c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.361494 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.362669 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" event={"ID":"5b90e90f-8fb3-440c-b0d3-20f9582764ea","Type":"ContainerStarted","Data":"abfb722305934d6a1575aa547ce3a20e834f4e5810e737066adfa9353a04c3fa"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.380647 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.383531 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.384157 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.884145328 +0000 UTC m=+145.200339589 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.386707 4783 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4cnvp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.18:6443/healthz\": dial tcp 10.217.0.18:6443: connect: connection refused" start-of-body= Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.386748 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" podUID="01de2d55-d330-461b-b801-0bfe3078baab" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.18:6443/healthz\": dial tcp 10.217.0.18:6443: connect: connection refused" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.386972 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r6pqv" event={"ID":"71af7827-a6d5-4a87-9839-996ab528213d","Type":"ContainerStarted","Data":"a8ade701b5483d5629977a5e46dd9ece461855c3565d3ca935c5be990d086102"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.388549 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-vjcp4" event={"ID":"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7","Type":"ContainerStarted","Data":"846279bfdecc023d4c0572ac40f93de2279d2e05bea57188bbb07e20b97c9f18"} Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.398044 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-rq6qg" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.401031 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-9hxcf"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.406255 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" event={"ID":"50389666-bf4f-4442-a4cd-f3609994ce1b","Type":"ContainerStarted","Data":"e85daa669bcdc6e32bc8c4432a0829e14c9f9ffdb436d9fe7b631d2f2676dc51"} Oct 02 10:55:11 crc kubenswrapper[4783]: W1002 10:55:11.447539 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod12e7e4aa_75cb_41c1_8d03_8eea90096e8c.slice/crio-75a62756c5badfd10e0f7319d01790d4dac90a3a178a7e522d60b437935003fb WatchSource:0}: Error finding container 75a62756c5badfd10e0f7319d01790d4dac90a3a178a7e522d60b437935003fb: Status 404 returned error can't find the container with id 75a62756c5badfd10e0f7319d01790d4dac90a3a178a7e522d60b437935003fb Oct 02 10:55:11 crc kubenswrapper[4783]: W1002 10:55:11.476114 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73cf839f_ec02_4db7_b640_e1c0783fafc9.slice/crio-09a827fac177695421ff8dc96111e92653301020290ee7c383590f5f673973aa WatchSource:0}: Error finding container 09a827fac177695421ff8dc96111e92653301020290ee7c383590f5f673973aa: Status 404 returned error can't find the container with id 09a827fac177695421ff8dc96111e92653301020290ee7c383590f5f673973aa Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.484230 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.484461 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-x4fgp"] Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.484635 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:11.984512687 +0000 UTC m=+145.300706948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.486558 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-dzxd5"] Oct 02 10:55:11 crc kubenswrapper[4783]: W1002 10:55:11.553331 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6670b373_8502_4db3_9f27_461eca66f043.slice/crio-a581433c88ab8dcbd2532e47fdab05180e6bcb00410134c788896d8a156c717f WatchSource:0}: Error finding container a581433c88ab8dcbd2532e47fdab05180e6bcb00410134c788896d8a156c717f: Status 404 returned error can't find the container with id a581433c88ab8dcbd2532e47fdab05180e6bcb00410134c788896d8a156c717f Oct 02 10:55:11 crc kubenswrapper[4783]: W1002 10:55:11.568176 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfeedb992_610e_4ceb_84f1_7d5a005d7826.slice/crio-dea1b6176e8484b2b272e7652393912bca7a8421fd2801a5a39ff7fe32fcc40d WatchSource:0}: Error finding container dea1b6176e8484b2b272e7652393912bca7a8421fd2801a5a39ff7fe32fcc40d: Status 404 returned error can't find the container with id dea1b6176e8484b2b272e7652393912bca7a8421fd2801a5a39ff7fe32fcc40d Oct 02 10:55:11 crc kubenswrapper[4783]: W1002 10:55:11.572572 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02091d48_2a3f_4efc_a173_fc17fb2f3f9a.slice/crio-8f6afc4958b14ab4fd68ffc9e15f9c5224342a87d95678cc7178c9fcf6b50ff5 WatchSource:0}: Error finding container 8f6afc4958b14ab4fd68ffc9e15f9c5224342a87d95678cc7178c9fcf6b50ff5: Status 404 returned error can't find the container with id 8f6afc4958b14ab4fd68ffc9e15f9c5224342a87d95678cc7178c9fcf6b50ff5 Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.585719 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.085707438 +0000 UTC m=+145.401901699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.585786 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.674809 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-r7v8v"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.686564 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.686846 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.186801926 +0000 UTC m=+145.502996187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.686983 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.687290 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.187282279 +0000 UTC m=+145.503476540 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.728936 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.787914 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.788289 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.288271734 +0000 UTC m=+145.604465995 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.788368 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.890682 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.891001 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.390988435 +0000 UTC m=+145.707182696 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.899730 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-ql8ns"] Oct 02 10:55:11 crc kubenswrapper[4783]: I1002 10:55:11.991261 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:11 crc kubenswrapper[4783]: E1002 10:55:11.992068 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.492051742 +0000 UTC m=+145.808246003 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.045770 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podeb03ecb8_df6d_404e_b248_90ca5372ecf9.slice/crio-3dd03d13a06f40531dd8a8d8bd82acb7c5ea8c09cb90f20b8a5d4106b17e24ba WatchSource:0}: Error finding container 3dd03d13a06f40531dd8a8d8bd82acb7c5ea8c09cb90f20b8a5d4106b17e24ba: Status 404 returned error can't find the container with id 3dd03d13a06f40531dd8a8d8bd82acb7c5ea8c09cb90f20b8a5d4106b17e24ba Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.059299 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0c6fa8cc_e2f2_46a1_995d_96a9b76f01fb.slice/crio-a0825df5bc63aeec21bb80163593428e21598f4bcf3fb98531e16c324b720624 WatchSource:0}: Error finding container a0825df5bc63aeec21bb80163593428e21598f4bcf3fb98531e16c324b720624: Status 404 returned error can't find the container with id a0825df5bc63aeec21bb80163593428e21598f4bcf3fb98531e16c324b720624 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.092922 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.093621 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.593609173 +0000 UTC m=+145.909803434 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.193937 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.194573 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.694559277 +0000 UTC m=+146.010753538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.262596 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.290836 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.296583 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.296936 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.796922839 +0000 UTC m=+146.113117100 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.308804 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-8vjpx"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.360095 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.364188 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.375930 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.378570 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.386805 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-q7g9p"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.398537 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.398975 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:12.898957872 +0000 UTC m=+146.215152143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.416582 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9675dc45_ac16_4440_a4b1_2c3cfeff2459.slice/crio-c4f37fd80efb7c540e241700295b94c06d9ebc2f110c7a18cedb6973dd49cd21 WatchSource:0}: Error finding container c4f37fd80efb7c540e241700295b94c06d9ebc2f110c7a18cedb6973dd49cd21: Status 404 returned error can't find the container with id c4f37fd80efb7c540e241700295b94c06d9ebc2f110c7a18cedb6973dd49cd21 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.500058 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.500942 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.000930643 +0000 UTC m=+146.317124894 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.514617 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" podStartSLOduration=123.514601832 podStartE2EDuration="2m3.514601832s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:12.513321279 +0000 UTC m=+145.829515530" watchObservedRunningTime="2025-10-02 10:55:12.514601832 +0000 UTC m=+145.830796093" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.528811 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" event={"ID":"eb03ecb8-df6d-404e-b248-90ca5372ecf9","Type":"ContainerStarted","Data":"3dd03d13a06f40531dd8a8d8bd82acb7c5ea8c09cb90f20b8a5d4106b17e24ba"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.530254 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.536419 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zwxct" event={"ID":"7567048e-a0b0-46e5-b4bf-51180f84b884","Type":"ContainerStarted","Data":"5b0e41e6dd1322a033bdcf75cce117cd69c01efc93fd7f53c8527b07333fc32b"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.539209 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r6pqv" event={"ID":"71af7827-a6d5-4a87-9839-996ab528213d","Type":"ContainerStarted","Data":"616dca96f06b63f9999f042cfd4448105312305129b42d27fe7553fbdad269c2"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.541190 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.544438 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddaa53316_d347_4aaf_b33d_9b6f63757502.slice/crio-29b7f7c96b57a20dc773d12ce2911ee46b53c26e75af64c34577619ff7eafea7 WatchSource:0}: Error finding container 29b7f7c96b57a20dc773d12ce2911ee46b53c26e75af64c34577619ff7eafea7: Status 404 returned error can't find the container with id 29b7f7c96b57a20dc773d12ce2911ee46b53c26e75af64c34577619ff7eafea7 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.546569 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.546612 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.551130 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" event={"ID":"a2328b20-92ab-42c6-a089-89ec4e1c4ffb","Type":"ContainerStarted","Data":"51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.556574 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.561919 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" event={"ID":"032450eb-6d0d-4cc2-90ee-c1ae7228f735","Type":"ContainerStarted","Data":"6d82a8a432cca36a149db772267fb8f284d64c26b20395abff00d9a0693cdb2d"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.567479 4783 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-zbtp8 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.567545 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" podUID="a2328b20-92ab-42c6-a089-89ec4e1c4ffb" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.570769 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" event={"ID":"0ec0d7d6-9f6a-43cb-984b-4e162c07da70","Type":"ContainerStarted","Data":"f4fc7d6b320460b5f1a85eb848fff14cdebb2c8e8d02cf6a4d3ecce71d6e0b06"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.572393 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" event={"ID":"73cf839f-ec02-4db7-b640-e1c0783fafc9","Type":"ContainerStarted","Data":"09a827fac177695421ff8dc96111e92653301020290ee7c383590f5f673973aa"} Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.584716 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1d0ded20_7af7_44b2_9bd8_4a6066faa4d5.slice/crio-6cced617806a43a7ff0a1347bb91f01ff152e2a6424714089c2dbbb68ae9b2ba WatchSource:0}: Error finding container 6cced617806a43a7ff0a1347bb91f01ff152e2a6424714089c2dbbb68ae9b2ba: Status 404 returned error can't find the container with id 6cced617806a43a7ff0a1347bb91f01ff152e2a6424714089c2dbbb68ae9b2ba Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.594045 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-r6pqv" podStartSLOduration=123.594028051 podStartE2EDuration="2m3.594028051s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:12.55636739 +0000 UTC m=+145.872561651" watchObservedRunningTime="2025-10-02 10:55:12.594028051 +0000 UTC m=+145.910222312" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.584952 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rq6qg" event={"ID":"093341b6-a75f-4acc-82fe-cddcc303f189","Type":"ContainerStarted","Data":"8d9d285028c76ee5577b68359c0b5a406b08ba3082affbed636986d57df40b2f"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.601545 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" podStartSLOduration=122.601525228 podStartE2EDuration="2m2.601525228s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:12.594114593 +0000 UTC m=+145.910308854" watchObservedRunningTime="2025-10-02 10:55:12.601525228 +0000 UTC m=+145.917719489" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.602764 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.603115 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.103100749 +0000 UTC m=+146.419295010 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.615639 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6f6a533d_1bcb_409b_994e_e4ec71cffaeb.slice/crio-090fe25ee9b0bc21c0db2a68e62ab38364dc6c136e805531fc7552de70c50349 WatchSource:0}: Error finding container 090fe25ee9b0bc21c0db2a68e62ab38364dc6c136e805531fc7552de70c50349: Status 404 returned error can't find the container with id 090fe25ee9b0bc21c0db2a68e62ab38364dc6c136e805531fc7552de70c50349 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.664978 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-9m8m9"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.674171 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" event={"ID":"88a3511a-30a6-4aa3-99d9-e65e05aaefc8","Type":"ContainerStarted","Data":"a4f23e839ab8bd6743a5e288655bb53df8de149f5d31e6b5e2161b5c772e1593"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.676945 4783 generic.go:334] "Generic (PLEG): container finished" podID="a1ba5a71-7a04-4446-8459-1748799af4db" containerID="0e9c250c11b8896a249ea2fda0abf7afe87f2d9c3c6b361ef47241c7e4f04df9" exitCode=0 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.677015 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" event={"ID":"a1ba5a71-7a04-4446-8459-1748799af4db","Type":"ContainerDied","Data":"0e9c250c11b8896a249ea2fda0abf7afe87f2d9c3c6b361ef47241c7e4f04df9"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.683390 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.686182 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.707308 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.707615 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.207603277 +0000 UTC m=+146.523797528 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.715182 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" event={"ID":"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb","Type":"ContainerStarted","Data":"71ab77cf2269b0a5776e65c0f959b70a1d80a5e94bd448ac2621c5913e58a325"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.731973 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" event={"ID":"baa575d2-ecb0-4e64-b0fe-361fec92e555","Type":"ContainerStarted","Data":"7cbdeeaf72ab82f39569ed87940b3802dd5752183ec24657a0b6172f54641cee"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.748521 4783 generic.go:334] "Generic (PLEG): container finished" podID="e435d9c9-4a33-4c0d-bb2b-84aa5e988124" containerID="88459e235e813c2c883af47cd7def58b410d85d88b7cd230b25b5a2bd281ebfb" exitCode=0 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.748661 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" event={"ID":"e435d9c9-4a33-4c0d-bb2b-84aa5e988124","Type":"ContainerDied","Data":"88459e235e813c2c883af47cd7def58b410d85d88b7cd230b25b5a2bd281ebfb"} Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.760129 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0ee74d4b_8751_4089_a0e6_a99132950452.slice/crio-ba198ef00a691412055dc0eccd0355975075b6168dbde92977b82dd91b944445 WatchSource:0}: Error finding container ba198ef00a691412055dc0eccd0355975075b6168dbde92977b82dd91b944445: Status 404 returned error can't find the container with id ba198ef00a691412055dc0eccd0355975075b6168dbde92977b82dd91b944445 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.782263 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" event={"ID":"feedb992-610e-4ceb-84f1-7d5a005d7826","Type":"ContainerStarted","Data":"dea1b6176e8484b2b272e7652393912bca7a8421fd2801a5a39ff7fe32fcc40d"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.783818 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.787259 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-9ppnb"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.798016 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-s7hdh"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.808159 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.808295 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.308270974 +0000 UTC m=+146.624465235 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.808496 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.809010 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.308994613 +0000 UTC m=+146.625188874 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.812632 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" event={"ID":"02091d48-2a3f-4efc-a173-fc17fb2f3f9a","Type":"ContainerStarted","Data":"8f6afc4958b14ab4fd68ffc9e15f9c5224342a87d95678cc7178c9fcf6b50ff5"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.830697 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-x9scv" event={"ID":"12e7e4aa-75cb-41c1-8d03-8eea90096e8c","Type":"ContainerStarted","Data":"75a62756c5badfd10e0f7319d01790d4dac90a3a178a7e522d60b437935003fb"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.831299 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.832847 4783 patch_prober.go:28] interesting pod/console-operator-58897d9998-x9scv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.832905 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x9scv" podUID="12e7e4aa-75cb-41c1-8d03-8eea90096e8c" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.838538 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6c1efff0_350e_4626_b854_d818e5b5f7f7.slice/crio-5dc38afe8894eafb389616b5fa907c1fbb17bdbe2af7ea49356695fec7629211 WatchSource:0}: Error finding container 5dc38afe8894eafb389616b5fa907c1fbb17bdbe2af7ea49356695fec7629211: Status 404 returned error can't find the container with id 5dc38afe8894eafb389616b5fa907c1fbb17bdbe2af7ea49356695fec7629211 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.850681 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-x9scv" podStartSLOduration=123.850660339 podStartE2EDuration="2m3.850660339s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:12.843712006 +0000 UTC m=+146.159906267" watchObservedRunningTime="2025-10-02 10:55:12.850660339 +0000 UTC m=+146.166854600" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.852212 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" event={"ID":"6670b373-8502-4db3-9f27-461eca66f043","Type":"ContainerStarted","Data":"a581433c88ab8dcbd2532e47fdab05180e6bcb00410134c788896d8a156c717f"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.865936 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-vjcp4" event={"ID":"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7","Type":"ContainerStarted","Data":"d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.880600 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" event={"ID":"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb","Type":"ContainerStarted","Data":"a0825df5bc63aeec21bb80163593428e21598f4bcf3fb98531e16c324b720624"} Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.881134 4783 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-4cnvp container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.18:6443/healthz\": dial tcp 10.217.0.18:6443: connect: connection refused" start-of-body= Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.881164 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" podUID="01de2d55-d330-461b-b801-0bfe3078baab" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.18:6443/healthz\": dial tcp 10.217.0.18:6443: connect: connection refused" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.897646 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.907301 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9"] Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.907664 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-vjcp4" podStartSLOduration=123.907644477 podStartE2EDuration="2m3.907644477s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:12.893898446 +0000 UTC m=+146.210092707" watchObservedRunningTime="2025-10-02 10:55:12.907644477 +0000 UTC m=+146.223838738" Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.909996 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:12 crc kubenswrapper[4783]: E1002 10:55:12.910603 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.410589685 +0000 UTC m=+146.726783946 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.920225 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-9d7f5" podStartSLOduration=123.920204047 podStartE2EDuration="2m3.920204047s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:12.909205678 +0000 UTC m=+146.225399939" watchObservedRunningTime="2025-10-02 10:55:12.920204047 +0000 UTC m=+146.236398308" Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.942003 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd507ecac_9761_4886_8acd_daff78a5d360.slice/crio-6f5718cfed7894840ac294bd4217d3e9e7200783fb8e563990626a77c999339b WatchSource:0}: Error finding container 6f5718cfed7894840ac294bd4217d3e9e7200783fb8e563990626a77c999339b: Status 404 returned error can't find the container with id 6f5718cfed7894840ac294bd4217d3e9e7200783fb8e563990626a77c999339b Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.960012 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d972c88_073c_4259_bc37_b77d1c0a0bfe.slice/crio-74a87ffb72b98888324de32c8a3dc2346a4d9f6aeddd96db62df3bae1f64ef14 WatchSource:0}: Error finding container 74a87ffb72b98888324de32c8a3dc2346a4d9f6aeddd96db62df3bae1f64ef14: Status 404 returned error can't find the container with id 74a87ffb72b98888324de32c8a3dc2346a4d9f6aeddd96db62df3bae1f64ef14 Oct 02 10:55:12 crc kubenswrapper[4783]: I1002 10:55:12.974250 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx"] Oct 02 10:55:12 crc kubenswrapper[4783]: W1002 10:55:12.991794 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda053c888_10fd_4b11_bc05_963d0a7ab8ad.slice/crio-fb1f9d19b9b216a2132346e16ae94a666418b8ff4aa8f2e93d4d5502fd9c4543 WatchSource:0}: Error finding container fb1f9d19b9b216a2132346e16ae94a666418b8ff4aa8f2e93d4d5502fd9c4543: Status 404 returned error can't find the container with id fb1f9d19b9b216a2132346e16ae94a666418b8ff4aa8f2e93d4d5502fd9c4543 Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.012702 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.013162 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.513146821 +0000 UTC m=+146.829341082 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.114463 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.114622 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.614596639 +0000 UTC m=+146.930790900 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.115175 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.115728 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.615710018 +0000 UTC m=+146.931904289 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.217659 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.217941 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.717891635 +0000 UTC m=+147.034085896 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.218253 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.219037 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.719028545 +0000 UTC m=+147.035222806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.319801 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.320149 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.820121393 +0000 UTC m=+147.136315654 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.421327 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.421755 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:13.921741175 +0000 UTC m=+147.237935436 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.522617 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.522784 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.022758241 +0000 UTC m=+147.338952502 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.523571 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.524018 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.024005284 +0000 UTC m=+147.340199535 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.624732 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.625030 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.12501499 +0000 UTC m=+147.441209251 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.727459 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.728160 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.22809936 +0000 UTC m=+147.544293621 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.828816 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.829315 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.329294811 +0000 UTC m=+147.645489072 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.919968 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" event={"ID":"17c756cd-c7ba-4efa-850d-7a9aff74099d","Type":"ContainerStarted","Data":"badf5a5e55be0f8401f5066102b4038840ae937fe4c96304cea25df9cbea7fe1"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.925017 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" event={"ID":"5b90e90f-8fb3-440c-b0d3-20f9582764ea","Type":"ContainerStarted","Data":"a74f859c404b08411c1d6039395f19fae8aaf9745443cc164b8bdf7508d9a5f3"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.930440 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:13 crc kubenswrapper[4783]: E1002 10:55:13.930897 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.430881172 +0000 UTC m=+147.747075433 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.933970 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" event={"ID":"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5","Type":"ContainerStarted","Data":"6cced617806a43a7ff0a1347bb91f01ff152e2a6424714089c2dbbb68ae9b2ba"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.940272 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" event={"ID":"daa53316-d347-4aaf-b33d-9b6f63757502","Type":"ContainerStarted","Data":"29b7f7c96b57a20dc773d12ce2911ee46b53c26e75af64c34577619ff7eafea7"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.941504 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" event={"ID":"02091d48-2a3f-4efc-a173-fc17fb2f3f9a","Type":"ContainerStarted","Data":"d0aa4439da842c5be5ced6f27ce2ce97ceebb6a673127b108b7d7d69eeb5092f"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.949670 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-x2ljx" podStartSLOduration=124.949653586 podStartE2EDuration="2m4.949653586s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:13.946932234 +0000 UTC m=+147.263126495" watchObservedRunningTime="2025-10-02 10:55:13.949653586 +0000 UTC m=+147.265847837" Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.949683 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" event={"ID":"6f6a533d-1bcb-409b-994e-e4ec71cffaeb","Type":"ContainerStarted","Data":"090fe25ee9b0bc21c0db2a68e62ab38364dc6c136e805531fc7552de70c50349"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.980355 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" event={"ID":"e435d9c9-4a33-4c0d-bb2b-84aa5e988124","Type":"ContainerStarted","Data":"ea7731e4b2a24e31af6511dbeb621a7e5e640f9fb5d1d008c8551f360de0d950"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.981326 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.986700 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-rq6qg" event={"ID":"093341b6-a75f-4acc-82fe-cddcc303f189","Type":"ContainerStarted","Data":"86366d063af328aac535409455aac6cc6f58c8c3f2ef2b79b743ae54ade9c84d"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.988633 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" event={"ID":"a6d0e458-bd0f-402c-8cad-db6eba1e45ff","Type":"ContainerStarted","Data":"ed3d929f2294a6822dab2df9fd7a8ef07041c967edfedeb2df26bed22d8a8d12"} Oct 02 10:55:13 crc kubenswrapper[4783]: I1002 10:55:13.989635 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" event={"ID":"0ee74d4b-8751-4089-a0e6-a99132950452","Type":"ContainerStarted","Data":"ba198ef00a691412055dc0eccd0355975075b6168dbde92977b82dd91b944445"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.002317 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" event={"ID":"33db5790-d9e7-4599-b8c7-7578ccb9940a","Type":"ContainerStarted","Data":"935468e0a1d37ca6b06001d233b470be91ba4df5a24fb086af99830a54c26530"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.003474 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" podStartSLOduration=125.003462351 podStartE2EDuration="2m5.003462351s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.002561427 +0000 UTC m=+147.318755698" watchObservedRunningTime="2025-10-02 10:55:14.003462351 +0000 UTC m=+147.319656612" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.015732 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" event={"ID":"032450eb-6d0d-4cc2-90ee-c1ae7228f735","Type":"ContainerStarted","Data":"807ee77acda2aa560e14a6416013256176bb6885782b6a37d42a9d4a4f8da567"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.026221 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-rq6qg" podStartSLOduration=7.026199138 podStartE2EDuration="7.026199138s" podCreationTimestamp="2025-10-02 10:55:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.024078223 +0000 UTC m=+147.340272484" watchObservedRunningTime="2025-10-02 10:55:14.026199138 +0000 UTC m=+147.342393399" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.033737 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.035224 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.535198635 +0000 UTC m=+147.851392896 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.036747 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" event={"ID":"b741b1e1-2c96-4a89-82eb-0a4106dfc6bb","Type":"ContainerStarted","Data":"c0e7fc40a6cc739f392c15a249b2a20c6a26116715f8aa1c3a51dcaf80496c16"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.042753 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" event={"ID":"88a3511a-30a6-4aa3-99d9-e65e05aaefc8","Type":"ContainerStarted","Data":"004290d917cb30bb97fca83d08ed025c085569c74d0b31cc96b55aa8010fd681"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.044815 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" event={"ID":"d507ecac-9761-4886-8acd-daff78a5d360","Type":"ContainerStarted","Data":"6f5718cfed7894840ac294bd4217d3e9e7200783fb8e563990626a77c999339b"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.051402 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-9hj2c" podStartSLOduration=124.051389131 podStartE2EDuration="2m4.051389131s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.049247754 +0000 UTC m=+147.365442015" watchObservedRunningTime="2025-10-02 10:55:14.051389131 +0000 UTC m=+147.367583382" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.052260 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-x9scv" event={"ID":"12e7e4aa-75cb-41c1-8d03-8eea90096e8c","Type":"ContainerStarted","Data":"e0de10df4181dac5d30c0ffbe137d790b47f8a09b965011200e7e0ca0af995e9"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.053024 4783 patch_prober.go:28] interesting pod/console-operator-58897d9998-x9scv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" start-of-body= Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.053072 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x9scv" podUID="12e7e4aa-75cb-41c1-8d03-8eea90096e8c" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": dial tcp 10.217.0.24:8443: connect: connection refused" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.054641 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" event={"ID":"73cf839f-ec02-4db7-b640-e1c0783fafc9","Type":"ContainerStarted","Data":"e01f6ed88c41b755f2d2e93d9392faf1d1ca17e16e5c9ea5c7a56fcad1c8b747"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.056315 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" event={"ID":"f2ba3d51-3014-4233-931b-adaa45c937dd","Type":"ContainerStarted","Data":"3d2282bfeca5433e10a8dbb29c7ec0b33e2dd5c318b2b2ec5d2e24bff35805f8"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.063682 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" event={"ID":"0c6fa8cc-e2f2-46a1-995d-96a9b76f01fb","Type":"ContainerStarted","Data":"9d1abba6668c492bf7f3960c61a98c4ed3620a4dde243fd8cb9a84b808836bc1"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.068885 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" event={"ID":"9d972c88-073c-4259-bc37-b77d1c0a0bfe","Type":"ContainerStarted","Data":"74a87ffb72b98888324de32c8a3dc2346a4d9f6aeddd96db62df3bae1f64ef14"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.080464 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-zwxct" event={"ID":"7567048e-a0b0-46e5-b4bf-51180f84b884","Type":"ContainerStarted","Data":"90def1da982b47b736023c972936a4cfe03ffb4ba0658158999b0e18b2ebf722"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.084882 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" event={"ID":"1b89cc5b-178f-4c08-b588-979dc2393bae","Type":"ContainerStarted","Data":"718de9da72a8f64e0dcc70c1b07a8c15730c05997de82dbe0f8a76b5135a3af0"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.088047 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-drj5j" podStartSLOduration=124.088025784 podStartE2EDuration="2m4.088025784s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.080132797 +0000 UTC m=+147.396327058" watchObservedRunningTime="2025-10-02 10:55:14.088025784 +0000 UTC m=+147.404220045" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.098354 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-8vjpx" event={"ID":"9675dc45-ac16-4440-a4b1-2c3cfeff2459","Type":"ContainerStarted","Data":"787bd40bea119cd13e247d2a2d34da9330480f53c064ffe05b49d6cb36ef958b"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.098398 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-8vjpx" event={"ID":"9675dc45-ac16-4440-a4b1-2c3cfeff2459","Type":"ContainerStarted","Data":"c4f37fd80efb7c540e241700295b94c06d9ebc2f110c7a18cedb6973dd49cd21"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.102640 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" event={"ID":"2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a","Type":"ContainerStarted","Data":"ae23746415208de54055979a53f80be22b804ae8eb2da7e6d6b66cb6e530e9b7"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.102695 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" event={"ID":"2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a","Type":"ContainerStarted","Data":"7df6134ae32693aac7fef5384e86d0650f4a0f5867f4b6449397e0f5b14760fc"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.104154 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" event={"ID":"baa575d2-ecb0-4e64-b0fe-361fec92e555","Type":"ContainerStarted","Data":"e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.104875 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.105863 4783 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-r7v8v container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" start-of-body= Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.105898 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" podUID="baa575d2-ecb0-4e64-b0fe-361fec92e555" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.28:8443/healthz\": dial tcp 10.217.0.28:8443: connect: connection refused" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.115881 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-s7hdh" event={"ID":"1577f6a5-596e-445c-995d-ffe75fe194b0","Type":"ContainerStarted","Data":"d5d16fc3731201030733c1744bb1ece4eb7e6b8a51d26d69c28e1ca3f42566dc"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.115923 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-s7hdh" event={"ID":"1577f6a5-596e-445c-995d-ffe75fe194b0","Type":"ContainerStarted","Data":"8c18865f15b8452d0caae91bf31af863ea2fe8519fa49a2df64c482d8135e83e"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.118074 4783 generic.go:334] "Generic (PLEG): container finished" podID="feedb992-610e-4ceb-84f1-7d5a005d7826" containerID="1133b9fb0cc38369c5f2a5fb557eeffc697ca1f1070f95df8308a67993427edc" exitCode=0 Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.118163 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" event={"ID":"feedb992-610e-4ceb-84f1-7d5a005d7826","Type":"ContainerDied","Data":"1133b9fb0cc38369c5f2a5fb557eeffc697ca1f1070f95df8308a67993427edc"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.133907 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8wv52" podStartSLOduration=124.133863499 podStartE2EDuration="2m4.133863499s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.105143214 +0000 UTC m=+147.421337475" watchObservedRunningTime="2025-10-02 10:55:14.133863499 +0000 UTC m=+147.450057760" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.134162 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" event={"ID":"eb03ecb8-df6d-404e-b248-90ca5372ecf9","Type":"ContainerStarted","Data":"9fbf657ce56c4180030aba107450ee04675b66da6e278844c80a5fe93a0deb4b"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.135282 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-mktxk" podStartSLOduration=124.135273006 podStartE2EDuration="2m4.135273006s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.13314477 +0000 UTC m=+147.449339031" watchObservedRunningTime="2025-10-02 10:55:14.135273006 +0000 UTC m=+147.451467267" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.135680 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.142077 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.642055355 +0000 UTC m=+147.958249826 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.146436 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" event={"ID":"50389666-bf4f-4442-a4cd-f3609994ce1b","Type":"ContainerStarted","Data":"5610e8c8773d92c251f695e5fe817167c8cfdbec33a4ac7fe1e36a6386918f62"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.164044 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" event={"ID":"6670b373-8502-4db3-9f27-461eca66f043","Type":"ContainerStarted","Data":"16fc68cc943631a40b6503668e38c15dbb06df111c5011c973030ab6a2fe2446"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.164094 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" event={"ID":"6670b373-8502-4db3-9f27-461eca66f043","Type":"ContainerStarted","Data":"8335881746bb5f18179974b0bc847224185f5611b98a66168bfab370fc1b3fb0"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.165306 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-ql8ns" podStartSLOduration=125.165286756 podStartE2EDuration="2m5.165286756s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.162658516 +0000 UTC m=+147.478852777" watchObservedRunningTime="2025-10-02 10:55:14.165286756 +0000 UTC m=+147.481481007" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.171736 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" event={"ID":"b9581e6c-3a17-4a11-bf2e-c24f90ececca","Type":"ContainerStarted","Data":"090dadbca71ed75bc7bd40ec0922a9737a41500158f65f1eccf572c6e973aee5"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.172894 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" event={"ID":"0ec0d7d6-9f6a-43cb-984b-4e162c07da70","Type":"ContainerStarted","Data":"431b77010ac29189c67f9f3c4a77c0a5013066c476e1aa88531546d1407542d1"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.173686 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" event={"ID":"b03dc3cf-caec-4c8a-86e3-db542117eef3","Type":"ContainerStarted","Data":"74d9a2192f0749c1500655da6a5fddfe29915d0c8a49181dcd24a60603c8a4d9"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.173706 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" event={"ID":"b03dc3cf-caec-4c8a-86e3-db542117eef3","Type":"ContainerStarted","Data":"336ff0882a69bcaed974d6ed502f99be94402409300ed6d67fb54961aab17b5d"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.174884 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" event={"ID":"6c1efff0-350e-4626-b854-d818e5b5f7f7","Type":"ContainerStarted","Data":"5dc38afe8894eafb389616b5fa907c1fbb17bdbe2af7ea49356695fec7629211"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.185928 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" event={"ID":"a053c888-10fd-4b11-bc05-963d0a7ab8ad","Type":"ContainerStarted","Data":"fb1f9d19b9b216a2132346e16ae94a666418b8ff4aa8f2e93d4d5502fd9c4543"} Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.186378 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.186433 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.204128 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.223346 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-zwxct" podStartSLOduration=124.223319052 podStartE2EDuration="2m4.223319052s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.218572147 +0000 UTC m=+147.534766408" watchObservedRunningTime="2025-10-02 10:55:14.223319052 +0000 UTC m=+147.539513313" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.239509 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.241751 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" podStartSLOduration=124.241712555 podStartE2EDuration="2m4.241712555s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.238702256 +0000 UTC m=+147.554896517" watchObservedRunningTime="2025-10-02 10:55:14.241712555 +0000 UTC m=+147.557906816" Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.243325 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.743298687 +0000 UTC m=+148.059492948 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.319916 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.324443 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-d2npj" podStartSLOduration=124.324400859 podStartE2EDuration="2m4.324400859s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.314655353 +0000 UTC m=+147.630849614" watchObservedRunningTime="2025-10-02 10:55:14.324400859 +0000 UTC m=+147.640595120" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.353608 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.355328 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.855316892 +0000 UTC m=+148.171511153 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.409394 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-s7hdh" podStartSLOduration=7.409379044 podStartE2EDuration="7.409379044s" podCreationTimestamp="2025-10-02 10:55:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.404944097 +0000 UTC m=+147.721138358" watchObservedRunningTime="2025-10-02 10:55:14.409379044 +0000 UTC m=+147.725573305" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.443879 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-xlxdx" podStartSLOduration=124.44385284 podStartE2EDuration="2m4.44385284s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.430779547 +0000 UTC m=+147.746973808" watchObservedRunningTime="2025-10-02 10:55:14.44385284 +0000 UTC m=+147.760047101" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.472574 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.472939 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:14.972923735 +0000 UTC m=+148.289117996 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.519787 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-xknld" podStartSLOduration=124.519759416 podStartE2EDuration="2m4.519759416s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.48491425 +0000 UTC m=+147.801108501" watchObservedRunningTime="2025-10-02 10:55:14.519759416 +0000 UTC m=+147.835953677" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.563350 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8544f" podStartSLOduration=124.563332812 podStartE2EDuration="2m4.563332812s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:14.5624923 +0000 UTC m=+147.878686561" watchObservedRunningTime="2025-10-02 10:55:14.563332812 +0000 UTC m=+147.879527073" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.576554 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.576890 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.076877958 +0000 UTC m=+148.393072219 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.663234 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.672588 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:14 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:14 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:14 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.672648 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.697120 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.698032 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.198018303 +0000 UTC m=+148.514212564 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.798884 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.799455 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.299430879 +0000 UTC m=+148.615625140 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:14 crc kubenswrapper[4783]: I1002 10:55:14.900391 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:14 crc kubenswrapper[4783]: E1002 10:55:14.900812 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.400793984 +0000 UTC m=+148.716988245 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.002434 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.003271 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.503258368 +0000 UTC m=+148.819452629 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.104334 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.104741 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.604717226 +0000 UTC m=+148.920911487 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.190717 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" event={"ID":"9d972c88-073c-4259-bc37-b77d1c0a0bfe","Type":"ContainerStarted","Data":"5a528329e69080613a1d9648f6675d2c1eaf864b764dcf6a59ba4101392318a2"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.192637 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" event={"ID":"a053c888-10fd-4b11-bc05-963d0a7ab8ad","Type":"ContainerStarted","Data":"a8961be96ad5e0b235faf3820a5a916d9a6929fb20bb3eec93b7152beccacd5e"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.192661 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" event={"ID":"a053c888-10fd-4b11-bc05-963d0a7ab8ad","Type":"ContainerStarted","Data":"7409e2d0d32e7337812d98e956b6720370022632b7ad38313d32e277697de1fc"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.192998 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.194061 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" event={"ID":"0ee74d4b-8751-4089-a0e6-a99132950452","Type":"ContainerStarted","Data":"17bdd6b10b3f81492f606d04352c3424b9758baeb4c932999b64291886629a1f"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.195951 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" event={"ID":"daa53316-d347-4aaf-b33d-9b6f63757502","Type":"ContainerStarted","Data":"60470b8140cc161180da30d3f86d6f9e98aa61e4db0e1fa464dce6b3b68115bf"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.195989 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" event={"ID":"daa53316-d347-4aaf-b33d-9b6f63757502","Type":"ContainerStarted","Data":"937b275ab9e50de7fdc801c74914957fef0008a2db22f333a9bcb8c2860b5f98"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.198383 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" event={"ID":"feedb992-610e-4ceb-84f1-7d5a005d7826","Type":"ContainerStarted","Data":"404a9354a8b656264aafaada6b75c4b8b70c2af3f5c504d89862925ccd28f266"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.202205 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" event={"ID":"1b89cc5b-178f-4c08-b588-979dc2393bae","Type":"ContainerStarted","Data":"92b6605683a695dfae0a900125c12be9bc8e05ed8c99041b9cb8266fc52103ef"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.202240 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" event={"ID":"1b89cc5b-178f-4c08-b588-979dc2393bae","Type":"ContainerStarted","Data":"4aa9bf23c22f29b5c7247bcb1679965fcf2750d69d515f6ebca8dec9c8949e6f"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.205183 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.205471 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.705460555 +0000 UTC m=+149.021654816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.205904 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" event={"ID":"2542c48f-ba7c-4077-9aaa-63fb5ddb0c1a","Type":"ContainerStarted","Data":"b0fd7dd1f9830b5f19eee9a93fcdc6fd0bf31460da02fef39f6445143000aa7a"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.207995 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" event={"ID":"a1ba5a71-7a04-4446-8459-1748799af4db","Type":"ContainerStarted","Data":"afb7550faee125d8e15b1096d3a63b86d0cbb2b758019fb9bc9515a9924bf68e"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.215211 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" event={"ID":"02091d48-2a3f-4efc-a173-fc17fb2f3f9a","Type":"ContainerStarted","Data":"961541805eab9bc2b9b08f2f330caa43cc42eaa862ccaf571bfbb3cdd22a982c"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.217085 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" event={"ID":"6f6a533d-1bcb-409b-994e-e4ec71cffaeb","Type":"ContainerStarted","Data":"11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.217680 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.226087 4783 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-q7g9p container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.226145 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.232151 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-8vjpx" event={"ID":"9675dc45-ac16-4440-a4b1-2c3cfeff2459","Type":"ContainerStarted","Data":"5e0503375b27067cb7b1f4f4718d48bbcce0e26d1ccff3f819cb37dfb94cdf10"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.232462 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.236031 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" event={"ID":"1d0ded20-7af7-44b2-9bd8-4a6066faa4d5","Type":"ContainerStarted","Data":"ba86a8fadd7493b64eeb5d17b4e95c1fc9331e0662eb100661288d1c4c4cb01f"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.236792 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.238992 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" event={"ID":"0ec0d7d6-9f6a-43cb-984b-4e162c07da70","Type":"ContainerStarted","Data":"d7723b0fe85be1aeb949f314b101170174038849fd93d9649ab0757f53a9ccf2"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.246804 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" event={"ID":"6c1efff0-350e-4626-b854-d818e5b5f7f7","Type":"ContainerStarted","Data":"dc1fa8e08a2115d4cb33be6505af9abd344363e09f9f29c77c2277c63c463f21"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.249527 4783 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-ncpvc container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.249564 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" podUID="1d0ded20-7af7-44b2-9bd8-4a6066faa4d5" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.263128 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" event={"ID":"a6d0e458-bd0f-402c-8cad-db6eba1e45ff","Type":"ContainerStarted","Data":"ed15b80ef58fe116129ccecdf8a60f12544231c0b1aa68855adeaec8b33b5527"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.281787 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" event={"ID":"b9581e6c-3a17-4a11-bf2e-c24f90ececca","Type":"ContainerStarted","Data":"6abb9b0fd2a77c90650ec337c1fdbdefff6af5f515aac0e69f4ef3a819553c37"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.281832 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" event={"ID":"b9581e6c-3a17-4a11-bf2e-c24f90ececca","Type":"ContainerStarted","Data":"8364ca20704e58961a545c153b26d86639b945e17f42d0b4d19b09c8fb80f120"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.305973 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.306258 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.806241145 +0000 UTC m=+149.122435406 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.306551 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.308010 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.808001951 +0000 UTC m=+149.124196212 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.310270 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" event={"ID":"d507ecac-9761-4886-8acd-daff78a5d360","Type":"ContainerStarted","Data":"fa096538fd7495003cca03de74f5aab93ecbba6343f7492de639c2490aa25f49"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.310692 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.326574 4783 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7qsc9 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" start-of-body= Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.326643 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" podUID="d507ecac-9761-4886-8acd-daff78a5d360" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.329163 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" event={"ID":"33db5790-d9e7-4599-b8c7-7578ccb9940a","Type":"ContainerStarted","Data":"acfdbbbcd1b5441463bf11424d1c129063df874782f2044534f774c798741001"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.330218 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.331447 4783 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-js7h8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.331476 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" podUID="33db5790-d9e7-4599-b8c7-7578ccb9940a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.337316 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" event={"ID":"17c756cd-c7ba-4efa-850d-7a9aff74099d","Type":"ContainerStarted","Data":"98458d31b71ea30aaf4b018b587b8b2dcd58993f2eff3826b846d75a89ca6613"} Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.338372 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.338527 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.347188 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-2w77p" podStartSLOduration=125.347169751 podStartE2EDuration="2m5.347169751s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.331698414 +0000 UTC m=+148.647892675" watchObservedRunningTime="2025-10-02 10:55:15.347169751 +0000 UTC m=+148.663364012" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.360492 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.407206 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.407432 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.907388225 +0000 UTC m=+149.223582496 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.408064 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.413584 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:15.913553337 +0000 UTC m=+149.229747598 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.506327 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-x4fgp" podStartSLOduration=125.506306086 podStartE2EDuration="2m5.506306086s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.50609254 +0000 UTC m=+148.822286801" watchObservedRunningTime="2025-10-02 10:55:15.506306086 +0000 UTC m=+148.822500347" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.509542 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.512117 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.012100498 +0000 UTC m=+149.328294759 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.613247 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.613641 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.113629438 +0000 UTC m=+149.429823699 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.648498 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" podStartSLOduration=125.648476394 podStartE2EDuration="2m5.648476394s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.581974445 +0000 UTC m=+148.898168726" watchObservedRunningTime="2025-10-02 10:55:15.648476394 +0000 UTC m=+148.964670675" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.648764 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-xgcwj" podStartSLOduration=125.648757681 podStartE2EDuration="2m5.648757681s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.643367079 +0000 UTC m=+148.959561340" watchObservedRunningTime="2025-10-02 10:55:15.648757681 +0000 UTC m=+148.964951942" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.679624 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:15 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:15 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:15 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.679678 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.714280 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.214264534 +0000 UTC m=+149.530458795 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.714323 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.714924 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.715234 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.215225889 +0000 UTC m=+149.531420150 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.792728 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-t4hbs" podStartSLOduration=125.792712056 podStartE2EDuration="2m5.792712056s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.713591496 +0000 UTC m=+149.029785777" watchObservedRunningTime="2025-10-02 10:55:15.792712056 +0000 UTC m=+149.108906317" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.793546 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" podStartSLOduration=125.793542398 podStartE2EDuration="2m5.793542398s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.785501767 +0000 UTC m=+149.101696028" watchObservedRunningTime="2025-10-02 10:55:15.793542398 +0000 UTC m=+149.109736659" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.815612 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.815774 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.315751722 +0000 UTC m=+149.631945983 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.816237 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.816551 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.316544063 +0000 UTC m=+149.632738324 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.906702 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" podStartSLOduration=125.906687623 podStartE2EDuration="2m5.906687623s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.903487459 +0000 UTC m=+149.219681720" watchObservedRunningTime="2025-10-02 10:55:15.906687623 +0000 UTC m=+149.222881884" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.907315 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" podStartSLOduration=125.90731103 podStartE2EDuration="2m5.90731103s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.84913961 +0000 UTC m=+149.165333871" watchObservedRunningTime="2025-10-02 10:55:15.90731103 +0000 UTC m=+149.223505281" Oct 02 10:55:15 crc kubenswrapper[4783]: I1002 10:55:15.917843 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:15 crc kubenswrapper[4783]: E1002 10:55:15.918269 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.418251897 +0000 UTC m=+149.734446158 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.019542 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.019819 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.519807988 +0000 UTC m=+149.836002249 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.030682 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-lmvhj" podStartSLOduration=127.030664123 podStartE2EDuration="2m7.030664123s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:15.967077941 +0000 UTC m=+149.283272192" watchObservedRunningTime="2025-10-02 10:55:16.030664123 +0000 UTC m=+149.346858384" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.031563 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-8mjk7" podStartSLOduration=126.031558017 podStartE2EDuration="2m6.031558017s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.017693582 +0000 UTC m=+149.333887843" watchObservedRunningTime="2025-10-02 10:55:16.031558017 +0000 UTC m=+149.347752278" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.074888 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" podStartSLOduration=126.074874556 podStartE2EDuration="2m6.074874556s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.073599512 +0000 UTC m=+149.389793773" watchObservedRunningTime="2025-10-02 10:55:16.074874556 +0000 UTC m=+149.391068817" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.120373 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.120752 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.620737452 +0000 UTC m=+149.936931713 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.124704 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-9m8m9" podStartSLOduration=126.124691896 podStartE2EDuration="2m6.124691896s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.121943963 +0000 UTC m=+149.438138224" watchObservedRunningTime="2025-10-02 10:55:16.124691896 +0000 UTC m=+149.440886157" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.197636 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-8vjpx" podStartSLOduration=8.197616993 podStartE2EDuration="8.197616993s" podCreationTimestamp="2025-10-02 10:55:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.162036977 +0000 UTC m=+149.478231238" watchObservedRunningTime="2025-10-02 10:55:16.197616993 +0000 UTC m=+149.513811254" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.221460 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.221775 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.721764948 +0000 UTC m=+150.037959209 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.226310 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-9hxcf" podStartSLOduration=126.226296747 podStartE2EDuration="2m6.226296747s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.199746599 +0000 UTC m=+149.515940860" watchObservedRunningTime="2025-10-02 10:55:16.226296747 +0000 UTC m=+149.542490998" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.226474 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-lqq4w" podStartSLOduration=126.226469912 podStartE2EDuration="2m6.226469912s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.224271524 +0000 UTC m=+149.540465785" watchObservedRunningTime="2025-10-02 10:55:16.226469912 +0000 UTC m=+149.542664173" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.265789 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" podStartSLOduration=126.265774625 podStartE2EDuration="2m6.265774625s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.265406665 +0000 UTC m=+149.581600926" watchObservedRunningTime="2025-10-02 10:55:16.265774625 +0000 UTC m=+149.581968886" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.296034 4783 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cm245 container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.296080 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" podUID="e435d9c9-4a33-4c0d-bb2b-84aa5e988124" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.296287 4783 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cm245 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.296302 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" podUID="e435d9c9-4a33-4c0d-bb2b-84aa5e988124" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.312645 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-6rrvt" podStartSLOduration=126.312630617 podStartE2EDuration="2m6.312630617s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.310399289 +0000 UTC m=+149.626593550" watchObservedRunningTime="2025-10-02 10:55:16.312630617 +0000 UTC m=+149.628824878" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.322870 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.323191 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.823177975 +0000 UTC m=+150.139372226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.338938 4783 patch_prober.go:28] interesting pod/console-operator-58897d9998-x9scv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.339068 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x9scv" podUID="12e7e4aa-75cb-41c1-8d03-8eea90096e8c" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.345933 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" event={"ID":"f2ba3d51-3014-4233-931b-adaa45c937dd","Type":"ContainerStarted","Data":"ef8e071f49f095c676d19e237402010de9b76b4f68f927f39a270ae0e820ab86"} Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.350788 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" event={"ID":"feedb992-610e-4ceb-84f1-7d5a005d7826","Type":"ContainerStarted","Data":"6cd979d4023e7812cdad0eb098e509dfe14e382ff2ebb0b42d67e1978ec67c9d"} Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.351854 4783 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-ncpvc container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.351890 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" podUID="1d0ded20-7af7-44b2-9bd8-4a6066faa4d5" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.21:8443/healthz\": dial tcp 10.217.0.21:8443: connect: connection refused" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.354026 4783 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-q7g9p container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.354084 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.355018 4783 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-js7h8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.355043 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" podUID="33db5790-d9e7-4599-b8c7-7578ccb9940a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": dial tcp 10.217.0.22:8443: connect: connection refused" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.355672 4783 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7qsc9 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.355915 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" podUID="d507ecac-9761-4886-8acd-daff78a5d360" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": dial tcp 10.217.0.39:5443: connect: connection refused" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.356250 4783 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-cm245 container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" start-of-body= Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.356360 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" podUID="e435d9c9-4a33-4c0d-bb2b-84aa5e988124" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.25:8443/healthz\": dial tcp 10.217.0.25:8443: connect: connection refused" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.403711 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" podStartSLOduration=127.403696042 podStartE2EDuration="2m7.403696042s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.401147385 +0000 UTC m=+149.717341646" watchObservedRunningTime="2025-10-02 10:55:16.403696042 +0000 UTC m=+149.719890303" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.426321 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.426674 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.426948 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.427521 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:16.927505748 +0000 UTC m=+150.243700009 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.442576 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.472391 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.528029 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.528172 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.528242 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.528763 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.02838297 +0000 UTC m=+150.344577231 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.532319 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.536069 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.561313 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.568347 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.576578 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.631220 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.631578 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.131556493 +0000 UTC m=+150.447750754 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.670732 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:16 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:16 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:16 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.671069 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.732899 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.733066 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.233042062 +0000 UTC m=+150.549236323 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.733687 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.734196 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.234183322 +0000 UTC m=+150.550377573 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.834750 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.835298 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.33527454 +0000 UTC m=+150.651468791 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:16 crc kubenswrapper[4783]: I1002 10:55:16.936693 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:16 crc kubenswrapper[4783]: E1002 10:55:16.937222 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.43720952 +0000 UTC m=+150.753403781 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.037508 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.038920 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.538904714 +0000 UTC m=+150.855098975 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.151449 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.151753 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.651738731 +0000 UTC m=+150.967932992 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.253107 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.253279 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.75325186 +0000 UTC m=+151.069446111 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.253325 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.255675 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.755663193 +0000 UTC m=+151.071857454 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.354531 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.354883 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.854870112 +0000 UTC m=+151.171064373 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.363753 4783 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-q7g9p container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" start-of-body= Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.363796 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.35:8080/healthz\": dial tcp 10.217.0.35:8080: connect: connection refused" Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.398914 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.405448 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-ncpvc" Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.420389 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" podStartSLOduration=128.420369494 podStartE2EDuration="2m8.420369494s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:16.584003313 +0000 UTC m=+149.900197584" watchObservedRunningTime="2025-10-02 10:55:17.420369494 +0000 UTC m=+150.736563755" Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.456160 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.459711 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:17.959695408 +0000 UTC m=+151.275889669 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.557523 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.557672 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.057649254 +0000 UTC m=+151.373843515 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.557776 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.558078 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.058066605 +0000 UTC m=+151.374260866 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.658867 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.659175 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.159160803 +0000 UTC m=+151.475355064 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.679351 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:17 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:17 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:17 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.679468 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.760683 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.761057 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.261041842 +0000 UTC m=+151.577236103 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.861691 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.862191 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.362175161 +0000 UTC m=+151.678369422 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:17 crc kubenswrapper[4783]: I1002 10:55:17.963164 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:17 crc kubenswrapper[4783]: E1002 10:55:17.963802 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.463776473 +0000 UTC m=+151.779970734 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.072398 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.572378818 +0000 UTC m=+151.888573069 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.072442 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.072695 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.072943 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.572936573 +0000 UTC m=+151.889130824 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.173868 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.174058 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.674031501 +0000 UTC m=+151.990225762 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.174099 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.174442 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.674431142 +0000 UTC m=+151.990625403 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.274574 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.274918 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.774903203 +0000 UTC m=+152.091097464 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.363326 4783 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-7qsc9 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.39:5443/healthz\": context deadline exceeded" start-of-body= Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.363844 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" podUID="d507ecac-9761-4886-8acd-daff78a5d360" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.39:5443/healthz\": context deadline exceeded" Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.367956 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"b776b34735169faf95871fbe48d5d6fa63ede58922f22c67f66f0a7a3d0e56c5"} Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.368927 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"71c60adeec3f43093b63384b387f4a400a16bed69f4b9ac2d98693ac03e8118f"} Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.370947 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"d36e6e7944b2da9ac29e07de1eaa27f6469fc092d2fae8999efa8281b9b17305"} Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.376032 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.376509 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.876494244 +0000 UTC m=+152.192688505 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.477042 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.478924 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:18.978508966 +0000 UTC m=+152.294703227 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.579109 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.579531 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.079516602 +0000 UTC m=+152.395710863 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.675056 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:18 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:18 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:18 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.675573 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.680795 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.681141 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.181124104 +0000 UTC m=+152.497318365 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.782919 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.783261 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.283248989 +0000 UTC m=+152.599443250 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.884252 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.884449 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.384425069 +0000 UTC m=+152.700619330 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.884482 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.884925 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.38481945 +0000 UTC m=+152.701013711 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:18 crc kubenswrapper[4783]: I1002 10:55:18.985350 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:18 crc kubenswrapper[4783]: E1002 10:55:18.985660 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.485645361 +0000 UTC m=+152.801839622 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.027931 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-97zw7"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.028849 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.036060 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.087352 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.087464 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.587452638 +0000 UTC m=+152.903646899 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.116490 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-97zw7"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.187962 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.188365 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kpzxw\" (UniqueName: \"kubernetes.io/projected/b241b260-2dbd-4383-8a28-b8728e86a605-kube-api-access-kpzxw\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.188511 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.688496465 +0000 UTC m=+153.004690726 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.188420 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-catalog-content\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.188584 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-utilities\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.233074 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cs782"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.233918 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: W1002 10:55:19.237018 4783 reflector.go:561] object-"openshift-marketplace"/"community-operators-dockercfg-dmngl": failed to list *v1.Secret: secrets "community-operators-dockercfg-dmngl" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.237059 4783 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"community-operators-dockercfg-dmngl\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"community-operators-dockercfg-dmngl\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.290045 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-catalog-content\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.290098 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-utilities\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.290154 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kpzxw\" (UniqueName: \"kubernetes.io/projected/b241b260-2dbd-4383-8a28-b8728e86a605-kube-api-access-kpzxw\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.290195 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.290473 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.790461356 +0000 UTC m=+153.106655617 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.290909 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-catalog-content\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.291119 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-utilities\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.299836 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cs782"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.307591 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-cm245" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.344317 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kpzxw\" (UniqueName: \"kubernetes.io/projected/b241b260-2dbd-4383-8a28-b8728e86a605-kube-api-access-kpzxw\") pod \"certified-operators-97zw7\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.375483 4783 generic.go:334] "Generic (PLEG): container finished" podID="17c756cd-c7ba-4efa-850d-7a9aff74099d" containerID="98458d31b71ea30aaf4b018b587b8b2dcd58993f2eff3826b846d75a89ca6613" exitCode=0 Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.375558 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" event={"ID":"17c756cd-c7ba-4efa-850d-7a9aff74099d","Type":"ContainerDied","Data":"98458d31b71ea30aaf4b018b587b8b2dcd58993f2eff3826b846d75a89ca6613"} Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.377952 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"8224273f55dcb509de12f551d0c91bfc21effd88b1259962588d58c6a4a8521a"} Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.379634 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"9d4dd7a560b1b7cf9c7592d1546e2c21223fbbd2e21ead1cd65b57e95b436205"} Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.381291 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"b8755bdf52ac50dca3bf4f90654f3166943cf3eb69aed4564eed421b935b73b2"} Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.381603 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.391030 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.391210 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fds57\" (UniqueName: \"kubernetes.io/projected/4e629356-d5af-454b-8451-c651ccf13b32-kube-api-access-fds57\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.391241 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-catalog-content\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.391294 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-utilities\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.391642 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.891625596 +0000 UTC m=+153.207819857 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.416854 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-zxxz4"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.456423 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.471908 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zxxz4"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.497382 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-utilities\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.497477 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fds57\" (UniqueName: \"kubernetes.io/projected/4e629356-d5af-454b-8451-c651ccf13b32-kube-api-access-fds57\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.497514 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-catalog-content\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.497536 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.497799 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:19.997787907 +0000 UTC m=+153.313982168 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.499910 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-utilities\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.501253 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-catalog-content\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.571237 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fds57\" (UniqueName: \"kubernetes.io/projected/4e629356-d5af-454b-8451-c651ccf13b32-kube-api-access-fds57\") pod \"community-operators-cs782\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.599980 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.600451 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-catalog-content\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.600590 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-utilities\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.600713 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bzslp\" (UniqueName: \"kubernetes.io/projected/31f92601-c54c-4f96-88db-35fe1673fe40-kube-api-access-bzslp\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.600906 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.100890658 +0000 UTC m=+153.417084919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.635590 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cbwgv"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.636709 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.642822 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.671898 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cbwgv"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.677472 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:19 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:19 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:19 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.677818 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.701511 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-utilities\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.701547 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-catalog-content\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.701573 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ljrl2\" (UniqueName: \"kubernetes.io/projected/37f50271-72da-49ab-972e-ef5cb30020cc-kube-api-access-ljrl2\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.701615 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.701647 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-utilities\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.701680 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bzslp\" (UniqueName: \"kubernetes.io/projected/31f92601-c54c-4f96-88db-35fe1673fe40-kube-api-access-bzslp\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.701718 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-catalog-content\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.701977 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.201966046 +0000 UTC m=+153.518160307 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.702219 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-catalog-content\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.702445 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-utilities\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.766197 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.767181 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.768000 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bzslp\" (UniqueName: \"kubernetes.io/projected/31f92601-c54c-4f96-88db-35fe1673fe40-kube-api-access-bzslp\") pod \"certified-operators-zxxz4\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.785011 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.799547 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.805661 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.806205 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.806611 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-catalog-content\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.806717 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-utilities\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.806811 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ljrl2\" (UniqueName: \"kubernetes.io/projected/37f50271-72da-49ab-972e-ef5cb30020cc-kube-api-access-ljrl2\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.807192 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.307176882 +0000 UTC m=+153.623371143 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.807844 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-catalog-content\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.808192 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-utilities\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.808343 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.880006 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ljrl2\" (UniqueName: \"kubernetes.io/projected/37f50271-72da-49ab-972e-ef5cb30020cc-kube-api-access-ljrl2\") pod \"community-operators-cbwgv\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.908962 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.909312 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42f92d75-c0d2-4935-a411-6c892019bde0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:19 crc kubenswrapper[4783]: I1002 10:55:19.909581 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/42f92d75-c0d2-4935-a411-6c892019bde0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:19 crc kubenswrapper[4783]: E1002 10:55:19.909669 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.409653197 +0000 UTC m=+153.725847458 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.011958 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.012210 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/42f92d75-c0d2-4935-a411-6c892019bde0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.012296 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42f92d75-c0d2-4935-a411-6c892019bde0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.012505 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/42f92d75-c0d2-4935-a411-6c892019bde0-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.012627 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.512610484 +0000 UTC m=+153.828804745 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.034871 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.035481 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.037011 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42f92d75-c0d2-4935-a411-6c892019bde0-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.046742 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.093990 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.113556 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.114071 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.614060611 +0000 UTC m=+153.930254872 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.209168 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.210294 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.211846 4783 patch_prober.go:28] interesting pod/console-f9d7485db-vjcp4 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.32:8443/health\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.211902 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-vjcp4" podUID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" containerName="console" probeResult="failure" output="Get \"https://10.217.0.32:8443/health\": dial tcp 10.217.0.32:8443: connect: connection refused" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.215968 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.216233 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.716219337 +0000 UTC m=+154.032413598 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.282967 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.283016 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.283303 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.283318 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.317527 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.317887 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.81787437 +0000 UTC m=+154.134068631 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.402358 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" event={"ID":"f2ba3d51-3014-4233-931b-adaa45c937dd","Type":"ContainerStarted","Data":"64c4f21e24b570dea290b00f3fa2a2395926a2fb7f7934943b7d25abaf406c44"} Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.404772 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.404807 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.422076 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.422397 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:20.922368658 +0000 UTC m=+154.238562919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.431947 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-hw769" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.451923 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-x9scv" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.493900 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-97zw7"] Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.523205 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.525651 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.025633673 +0000 UTC m=+154.341828014 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: W1002 10:55:20.538491 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb241b260_2dbd_4383_8a28_b8728e86a605.slice/crio-8189f44038888d6d66622e27d62c0b48887ae4e96458df2ec310aed22d0ab21e WatchSource:0}: Error finding container 8189f44038888d6d66622e27d62c0b48887ae4e96458df2ec310aed22d0ab21e: Status 404 returned error can't find the container with id 8189f44038888d6d66622e27d62c0b48887ae4e96458df2ec310aed22d0ab21e Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.627470 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.627863 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.127845941 +0000 UTC m=+154.444040202 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.669511 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.685549 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:20 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:20 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:20 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.685600 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.735892 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.737769 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.237754991 +0000 UTC m=+154.553949252 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.837952 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.838857 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.839172 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.339157927 +0000 UTC m=+154.655352188 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.844814 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.845328 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.940131 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:20 crc kubenswrapper[4783]: E1002 10:55:20.940596 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.440584854 +0000 UTC m=+154.756779115 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:20 crc kubenswrapper[4783]: I1002 10:55:20.984553 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-zxxz4"] Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.046038 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.046313 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.546298353 +0000 UTC m=+154.862492614 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.077332 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.101799 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.120648 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-7qsc9" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.147281 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.147578 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.647567136 +0000 UTC m=+154.963761397 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.218640 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.219539 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h7sxv"] Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.219746 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17c756cd-c7ba-4efa-850d-7a9aff74099d" containerName="collect-profiles" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.219757 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="17c756cd-c7ba-4efa-850d-7a9aff74099d" containerName="collect-profiles" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.219847 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="17c756cd-c7ba-4efa-850d-7a9aff74099d" containerName="collect-profiles" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.224842 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.227053 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.247574 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7sxv"] Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.247957 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.248631 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.748585202 +0000 UTC m=+155.064779463 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.353965 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mvr7d\" (UniqueName: \"kubernetes.io/projected/17c756cd-c7ba-4efa-850d-7a9aff74099d-kube-api-access-mvr7d\") pod \"17c756cd-c7ba-4efa-850d-7a9aff74099d\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.354328 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17c756cd-c7ba-4efa-850d-7a9aff74099d-config-volume\") pod \"17c756cd-c7ba-4efa-850d-7a9aff74099d\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.354508 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17c756cd-c7ba-4efa-850d-7a9aff74099d-secret-volume\") pod \"17c756cd-c7ba-4efa-850d-7a9aff74099d\" (UID: \"17c756cd-c7ba-4efa-850d-7a9aff74099d\") " Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.354726 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzvc2\" (UniqueName: \"kubernetes.io/projected/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-kube-api-access-wzvc2\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.354785 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.354808 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-catalog-content\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.354847 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-utilities\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.355856 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.855839453 +0000 UTC m=+155.172033714 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.356059 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/17c756cd-c7ba-4efa-850d-7a9aff74099d-config-volume" (OuterVolumeSpecName: "config-volume") pod "17c756cd-c7ba-4efa-850d-7a9aff74099d" (UID: "17c756cd-c7ba-4efa-850d-7a9aff74099d"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.387633 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17c756cd-c7ba-4efa-850d-7a9aff74099d-kube-api-access-mvr7d" (OuterVolumeSpecName: "kube-api-access-mvr7d") pod "17c756cd-c7ba-4efa-850d-7a9aff74099d" (UID: "17c756cd-c7ba-4efa-850d-7a9aff74099d"). InnerVolumeSpecName "kube-api-access-mvr7d". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.392968 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17c756cd-c7ba-4efa-850d-7a9aff74099d-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "17c756cd-c7ba-4efa-850d-7a9aff74099d" (UID: "17c756cd-c7ba-4efa-850d-7a9aff74099d"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.415088 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" event={"ID":"17c756cd-c7ba-4efa-850d-7a9aff74099d","Type":"ContainerDied","Data":"badf5a5e55be0f8401f5066102b4038840ae937fe4c96304cea25df9cbea7fe1"} Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.415125 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="badf5a5e55be0f8401f5066102b4038840ae937fe4c96304cea25df9cbea7fe1" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.415197 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.423627 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerStarted","Data":"d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7"} Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.423670 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerStarted","Data":"8189f44038888d6d66622e27d62c0b48887ae4e96458df2ec310aed22d0ab21e"} Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.427165 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxxz4" event={"ID":"31f92601-c54c-4f96-88db-35fe1673fe40","Type":"ContainerStarted","Data":"87f2e19a0146e325ee2e3327a00d0aa6448bc913dd149e4e3b3987ffcce503d5"} Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.429436 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"42f92d75-c0d2-4935-a411-6c892019bde0","Type":"ContainerStarted","Data":"f4fbcff6fc8aeadab8d0e0e19fca659a95af1bccdfb80af000f44aa31d55761e"} Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.455428 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.455669 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.955616266 +0000 UTC m=+155.271810527 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.455845 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-utilities\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456070 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzvc2\" (UniqueName: \"kubernetes.io/projected/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-kube-api-access-wzvc2\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456208 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456241 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-catalog-content\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456322 4783 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/17c756cd-c7ba-4efa-850d-7a9aff74099d-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456351 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mvr7d\" (UniqueName: \"kubernetes.io/projected/17c756cd-c7ba-4efa-850d-7a9aff74099d-kube-api-access-mvr7d\") on node \"crc\" DevicePath \"\"" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456362 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/17c756cd-c7ba-4efa-850d-7a9aff74099d-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.456586 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:21.956568751 +0000 UTC m=+155.272763012 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456624 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-catalog-content\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.456803 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-utilities\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.484013 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzvc2\" (UniqueName: \"kubernetes.io/projected/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-kube-api-access-wzvc2\") pod \"redhat-marketplace-h7sxv\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.514579 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.514960 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.530710 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cbwgv"] Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.557761 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.557917 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.057894366 +0000 UTC m=+155.374088627 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.558114 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.559159 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.059151339 +0000 UTC m=+155.375345600 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.568537 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.583977 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cs782"] Oct 02 10:55:21 crc kubenswrapper[4783]: W1002 10:55:21.596713 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e629356_d5af_454b_8451_c651ccf13b32.slice/crio-e0aa1b93c904f50cb41610b881fe0382e37ff0a8d6e80caebd54fc7547dd22b2 WatchSource:0}: Error finding container e0aa1b93c904f50cb41610b881fe0382e37ff0a8d6e80caebd54fc7547dd22b2: Status 404 returned error can't find the container with id e0aa1b93c904f50cb41610b881fe0382e37ff0a8d6e80caebd54fc7547dd22b2 Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.610559 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-4ntdb"] Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.612995 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.616266 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ntdb"] Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.671863 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.672225 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.172209011 +0000 UTC m=+155.488403262 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.674587 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:21 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:21 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:21 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.674633 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.762090 4783 patch_prober.go:28] interesting pod/apiserver-76f77b778f-dzxd5 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]log ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]etcd ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/start-apiserver-admission-initializer ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/generic-apiserver-start-informers ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/max-in-flight-filter ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/storage-object-count-tracker-hook ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/image.openshift.io-apiserver-caches ok Oct 02 10:55:21 crc kubenswrapper[4783]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Oct 02 10:55:21 crc kubenswrapper[4783]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/project.openshift.io-projectcache ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/openshift.io-startinformers ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/openshift.io-restmapperupdater ok Oct 02 10:55:21 crc kubenswrapper[4783]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Oct 02 10:55:21 crc kubenswrapper[4783]: livez check failed Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.762150 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" podUID="feedb992-610e-4ceb-84f1-7d5a005d7826" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.772820 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-catalog-content\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.772862 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d4gmk\" (UniqueName: \"kubernetes.io/projected/c97a1686-3bf0-4739-a538-a191023eff38-kube-api-access-d4gmk\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.772903 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-utilities\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.772931 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.773173 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.273161466 +0000 UTC m=+155.589355727 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.847323 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7sxv"] Oct 02 10:55:21 crc kubenswrapper[4783]: W1002 10:55:21.864677 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf7d4c4d_8ca4_4ecb_aae5_8cc41f64d850.slice/crio-1af64d36a11b2d762142e8a7d3c729809cbeb9a8bd0e69a015c1fd1372735d72 WatchSource:0}: Error finding container 1af64d36a11b2d762142e8a7d3c729809cbeb9a8bd0e69a015c1fd1372735d72: Status 404 returned error can't find the container with id 1af64d36a11b2d762142e8a7d3c729809cbeb9a8bd0e69a015c1fd1372735d72 Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.873934 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.874120 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.37409508 +0000 UTC m=+155.690289341 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.874220 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.874288 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-catalog-content\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.874321 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d4gmk\" (UniqueName: \"kubernetes.io/projected/c97a1686-3bf0-4739-a538-a191023eff38-kube-api-access-d4gmk\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.874367 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-utilities\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.874524 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.374515831 +0000 UTC m=+155.690710092 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.874662 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-catalog-content\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.874693 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-utilities\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.893119 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d4gmk\" (UniqueName: \"kubernetes.io/projected/c97a1686-3bf0-4739-a538-a191023eff38-kube-api-access-d4gmk\") pod \"redhat-marketplace-4ntdb\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.942571 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:55:21 crc kubenswrapper[4783]: I1002 10:55:21.975512 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:21 crc kubenswrapper[4783]: E1002 10:55:21.975867 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.475836224 +0000 UTC m=+155.792030485 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.078160 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.078628 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.578617366 +0000 UTC m=+155.894811628 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.179374 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.179544 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.67951957 +0000 UTC m=+155.995713831 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.179699 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.180107 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.680092525 +0000 UTC m=+155.996286796 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.207762 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-ncpxv"] Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.209022 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.210941 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.218890 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ncpxv"] Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.281182 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.281328 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.781299296 +0000 UTC m=+156.097493567 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.281367 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.281684 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.781674106 +0000 UTC m=+156.097868377 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.314611 4783 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.323687 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ntdb"] Oct 02 10:55:22 crc kubenswrapper[4783]: W1002 10:55:22.331208 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc97a1686_3bf0_4739_a538_a191023eff38.slice/crio-591e1fb7381805018a78b794a4d8322b0e474091bb35ac29784ded1cf82d6f0b WatchSource:0}: Error finding container 591e1fb7381805018a78b794a4d8322b0e474091bb35ac29784ded1cf82d6f0b: Status 404 returned error can't find the container with id 591e1fb7381805018a78b794a4d8322b0e474091bb35ac29784ded1cf82d6f0b Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.384355 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.384537 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.88451256 +0000 UTC m=+156.200706821 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.384585 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjshf\" (UniqueName: \"kubernetes.io/projected/797d3b59-9112-4efe-bd7d-34718545bc1c-kube-api-access-cjshf\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.384746 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-catalog-content\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.384995 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.385053 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-utilities\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.385507 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.885405063 +0000 UTC m=+156.201599334 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.436445 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerStarted","Data":"18af1ce8df4a003cbf805491f175be3c4dea266e19c885583af6cd85ccfe4bc0"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.436490 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerStarted","Data":"bc8fb8ba0a93baddba4dcfd867124464efca82b6b2b32dddca5e7d50ff0b1553"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.438129 4783 generic.go:334] "Generic (PLEG): container finished" podID="31f92601-c54c-4f96-88db-35fe1673fe40" containerID="f64f3cdb929ab7a818497f660dbeb543e8de41533b1fb5fddc66b9fcd2cd9c9b" exitCode=0 Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.438160 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxxz4" event={"ID":"31f92601-c54c-4f96-88db-35fe1673fe40","Type":"ContainerDied","Data":"f64f3cdb929ab7a818497f660dbeb543e8de41533b1fb5fddc66b9fcd2cd9c9b"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.439722 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.442321 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" event={"ID":"f2ba3d51-3014-4233-931b-adaa45c937dd","Type":"ContainerStarted","Data":"37cd03da89c617c20b54a09ecbc70aea4a54b2cb581e2ada577999bdb63a457c"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.444836 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ntdb" event={"ID":"c97a1686-3bf0-4739-a538-a191023eff38","Type":"ContainerStarted","Data":"591e1fb7381805018a78b794a4d8322b0e474091bb35ac29784ded1cf82d6f0b"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.453464 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"42f92d75-c0d2-4935-a411-6c892019bde0","Type":"ContainerStarted","Data":"4b2723a4ea95b7a98a208c3dac633f8c9f5710f41972ac85f573b43f89a86c98"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.457292 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7sxv" event={"ID":"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850","Type":"ContainerStarted","Data":"1af64d36a11b2d762142e8a7d3c729809cbeb9a8bd0e69a015c1fd1372735d72"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.458541 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerStarted","Data":"86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.458558 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerStarted","Data":"e0aa1b93c904f50cb41610b881fe0382e37ff0a8d6e80caebd54fc7547dd22b2"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.460085 4783 generic.go:334] "Generic (PLEG): container finished" podID="b241b260-2dbd-4383-8a28-b8728e86a605" containerID="d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7" exitCode=0 Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.460258 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerDied","Data":"d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7"} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.486505 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.486675 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.986649335 +0000 UTC m=+156.302843596 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.486717 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.486738 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-utilities\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.486762 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjshf\" (UniqueName: \"kubernetes.io/projected/797d3b59-9112-4efe-bd7d-34718545bc1c-kube-api-access-cjshf\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.486807 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-catalog-content\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.487082 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:22.987073606 +0000 UTC m=+156.303267867 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.487219 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-utilities\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.487254 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-catalog-content\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.588258 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.588474 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:23.088447802 +0000 UTC m=+156.404642073 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.589376 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.589783 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:23.089770977 +0000 UTC m=+156.405965238 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.614440 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5gvbx"] Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.615706 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.621097 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjshf\" (UniqueName: \"kubernetes.io/projected/797d3b59-9112-4efe-bd7d-34718545bc1c-kube-api-access-cjshf\") pod \"redhat-operators-ncpxv\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.624395 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5gvbx"] Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.666136 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:22 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:22 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:22 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.666338 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.690383 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.690491 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-10-02 10:55:23.190474315 +0000 UTC m=+156.506668576 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.690779 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: E1002 10:55:22.691087 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-10-02 10:55:23.191075351 +0000 UTC m=+156.507269622 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-shp99" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.742270 4783 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-10-02T10:55:22.314634242Z","Handler":null,"Name":""} Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.744710 4783 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.744740 4783 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.792267 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.792602 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-catalog-content\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.792654 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-psw82\" (UniqueName: \"kubernetes.io/projected/9a31888b-bb76-4ec4-91fe-745018b2e966-kube-api-access-psw82\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.792684 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-utilities\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.797408 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.822707 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.823444 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.826096 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.828989 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.840198 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.894271 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-utilities\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.894373 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.894447 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-catalog-content\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.894489 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-psw82\" (UniqueName: \"kubernetes.io/projected/9a31888b-bb76-4ec4-91fe-745018b2e966-kube-api-access-psw82\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.895126 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-utilities\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.895180 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-catalog-content\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.906567 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.912293 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-psw82\" (UniqueName: \"kubernetes.io/projected/9a31888b-bb76-4ec4-91fe-745018b2e966-kube-api-access-psw82\") pod \"redhat-operators-5gvbx\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.938581 4783 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.938629 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:22 crc kubenswrapper[4783]: I1002 10:55:22.961868 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.004211 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/17062cd9-24b6-489a-8474-c5e43feb3819-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.004301 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/17062cd9-24b6-489a-8474-c5e43feb3819-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.017496 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-shp99\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.105588 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/17062cd9-24b6-489a-8474-c5e43feb3819-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.105662 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/17062cd9-24b6-489a-8474-c5e43feb3819-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.105957 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/17062cd9-24b6-489a-8474-c5e43feb3819-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.124234 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/17062cd9-24b6-489a-8474-c5e43feb3819-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.136724 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.148858 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-ncpxv"] Oct 02 10:55:23 crc kubenswrapper[4783]: W1002 10:55:23.156237 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod797d3b59_9112_4efe_bd7d_34718545bc1c.slice/crio-32c0d35537aea3c056c16c0c9f872ac53f0c41987ee6a687a1c9cbe929ea2c31 WatchSource:0}: Error finding container 32c0d35537aea3c056c16c0c9f872ac53f0c41987ee6a687a1c9cbe929ea2c31: Status 404 returned error can't find the container with id 32c0d35537aea3c056c16c0c9f872ac53f0c41987ee6a687a1c9cbe929ea2c31 Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.231247 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.290877 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5gvbx"] Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.473079 4783 generic.go:334] "Generic (PLEG): container finished" podID="4e629356-d5af-454b-8451-c651ccf13b32" containerID="86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548" exitCode=0 Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.473428 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerDied","Data":"86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.479155 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncpxv" event={"ID":"797d3b59-9112-4efe-bd7d-34718545bc1c","Type":"ContainerStarted","Data":"32c0d35537aea3c056c16c0c9f872ac53f0c41987ee6a687a1c9cbe929ea2c31"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.479325 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-shp99"] Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.485760 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gvbx" event={"ID":"9a31888b-bb76-4ec4-91fe-745018b2e966","Type":"ContainerStarted","Data":"043dacd7ad448c1839d8f41844f65e2272afb763818d0c5229262fb13436aa3c"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.492292 4783 generic.go:334] "Generic (PLEG): container finished" podID="37f50271-72da-49ab-972e-ef5cb30020cc" containerID="18af1ce8df4a003cbf805491f175be3c4dea266e19c885583af6cd85ccfe4bc0" exitCode=0 Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.492348 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerDied","Data":"18af1ce8df4a003cbf805491f175be3c4dea266e19c885583af6cd85ccfe4bc0"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.508624 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" event={"ID":"f2ba3d51-3014-4233-931b-adaa45c937dd","Type":"ContainerStarted","Data":"b3b48ccf9ecbce288ca8d1033b23682d54c4ad9390f7c56140b13453332d5481"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.511758 4783 generic.go:334] "Generic (PLEG): container finished" podID="c97a1686-3bf0-4739-a538-a191023eff38" containerID="a8b472fcd0c864d83710d378367277c9c2f61326379a369993cdf88828fdd329" exitCode=0 Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.511838 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ntdb" event={"ID":"c97a1686-3bf0-4739-a538-a191023eff38","Type":"ContainerDied","Data":"a8b472fcd0c864d83710d378367277c9c2f61326379a369993cdf88828fdd329"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.513568 4783 generic.go:334] "Generic (PLEG): container finished" podID="42f92d75-c0d2-4935-a411-6c892019bde0" containerID="4b2723a4ea95b7a98a208c3dac633f8c9f5710f41972ac85f573b43f89a86c98" exitCode=0 Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.513628 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"42f92d75-c0d2-4935-a411-6c892019bde0","Type":"ContainerDied","Data":"4b2723a4ea95b7a98a208c3dac633f8c9f5710f41972ac85f573b43f89a86c98"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.515767 4783 generic.go:334] "Generic (PLEG): container finished" podID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerID="28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8" exitCode=0 Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.515792 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7sxv" event={"ID":"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850","Type":"ContainerDied","Data":"28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8"} Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.573764 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.576259 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-9ppnb" podStartSLOduration=16.576243185 podStartE2EDuration="16.576243185s" podCreationTimestamp="2025-10-02 10:55:07 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:23.575182017 +0000 UTC m=+156.891376288" watchObservedRunningTime="2025-10-02 10:55:23.576243185 +0000 UTC m=+156.892437446" Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.623653 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Oct 02 10:55:23 crc kubenswrapper[4783]: W1002 10:55:23.631870 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod17062cd9_24b6_489a_8474_c5e43feb3819.slice/crio-6412b4d31352ff99f750d8170fa43a842945c6fad23cc9742b485d820ea4b2df WatchSource:0}: Error finding container 6412b4d31352ff99f750d8170fa43a842945c6fad23cc9742b485d820ea4b2df: Status 404 returned error can't find the container with id 6412b4d31352ff99f750d8170fa43a842945c6fad23cc9742b485d820ea4b2df Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.669185 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:23 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:23 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:23 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:23 crc kubenswrapper[4783]: I1002 10:55:23.669231 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.521723 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" event={"ID":"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe","Type":"ContainerStarted","Data":"fffef357456d8d69aabd735886127d301349d352c75dc8bbf29d36f40216d2c0"} Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.524368 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"17062cd9-24b6-489a-8474-c5e43feb3819","Type":"ContainerStarted","Data":"ca38c3782efe5131c8be61984a50a7c84e08563c1b865946e230262845735508"} Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.524440 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"17062cd9-24b6-489a-8474-c5e43feb3819","Type":"ContainerStarted","Data":"6412b4d31352ff99f750d8170fa43a842945c6fad23cc9742b485d820ea4b2df"} Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.525920 4783 generic.go:334] "Generic (PLEG): container finished" podID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerID="ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9" exitCode=0 Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.526020 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncpxv" event={"ID":"797d3b59-9112-4efe-bd7d-34718545bc1c","Type":"ContainerDied","Data":"ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9"} Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.668771 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:24 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:24 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:24 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.668825 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.795391 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.931612 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42f92d75-c0d2-4935-a411-6c892019bde0-kube-api-access\") pod \"42f92d75-c0d2-4935-a411-6c892019bde0\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.931756 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/42f92d75-c0d2-4935-a411-6c892019bde0-kubelet-dir\") pod \"42f92d75-c0d2-4935-a411-6c892019bde0\" (UID: \"42f92d75-c0d2-4935-a411-6c892019bde0\") " Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.931841 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/42f92d75-c0d2-4935-a411-6c892019bde0-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "42f92d75-c0d2-4935-a411-6c892019bde0" (UID: "42f92d75-c0d2-4935-a411-6c892019bde0"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.932058 4783 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/42f92d75-c0d2-4935-a411-6c892019bde0-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 02 10:55:24 crc kubenswrapper[4783]: I1002 10:55:24.940069 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42f92d75-c0d2-4935-a411-6c892019bde0-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "42f92d75-c0d2-4935-a411-6c892019bde0" (UID: "42f92d75-c0d2-4935-a411-6c892019bde0"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.032736 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/42f92d75-c0d2-4935-a411-6c892019bde0-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.422502 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.428847 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-dzxd5" Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.530948 4783 generic.go:334] "Generic (PLEG): container finished" podID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerID="4d4e709fb0cdb005f439f2d5fcd17fdcf26b89fcf3bf1bc071baba38cf1bba9f" exitCode=0 Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.531011 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gvbx" event={"ID":"9a31888b-bb76-4ec4-91fe-745018b2e966","Type":"ContainerDied","Data":"4d4e709fb0cdb005f439f2d5fcd17fdcf26b89fcf3bf1bc071baba38cf1bba9f"} Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.536678 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" event={"ID":"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe","Type":"ContainerStarted","Data":"d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9"} Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.542960 4783 generic.go:334] "Generic (PLEG): container finished" podID="17062cd9-24b6-489a-8474-c5e43feb3819" containerID="ca38c3782efe5131c8be61984a50a7c84e08563c1b865946e230262845735508" exitCode=0 Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.543012 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"17062cd9-24b6-489a-8474-c5e43feb3819","Type":"ContainerDied","Data":"ca38c3782efe5131c8be61984a50a7c84e08563c1b865946e230262845735508"} Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.545900 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.551798 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"42f92d75-c0d2-4935-a411-6c892019bde0","Type":"ContainerDied","Data":"f4fbcff6fc8aeadab8d0e0e19fca659a95af1bccdfb80af000f44aa31d55761e"} Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.551834 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f4fbcff6fc8aeadab8d0e0e19fca659a95af1bccdfb80af000f44aa31d55761e" Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.669651 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:25 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:25 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:25 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.669711 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:25 crc kubenswrapper[4783]: I1002 10:55:25.853163 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-8vjpx" Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.570114 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" podStartSLOduration=136.570100065 podStartE2EDuration="2m16.570100065s" podCreationTimestamp="2025-10-02 10:53:10 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:55:26.569648333 +0000 UTC m=+159.885842594" watchObservedRunningTime="2025-10-02 10:55:26.570100065 +0000 UTC m=+159.886294326" Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.669222 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:26 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:26 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:26 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.669285 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.789168 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.964200 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/17062cd9-24b6-489a-8474-c5e43feb3819-kube-api-access\") pod \"17062cd9-24b6-489a-8474-c5e43feb3819\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.964369 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/17062cd9-24b6-489a-8474-c5e43feb3819-kubelet-dir\") pod \"17062cd9-24b6-489a-8474-c5e43feb3819\" (UID: \"17062cd9-24b6-489a-8474-c5e43feb3819\") " Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.964652 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/17062cd9-24b6-489a-8474-c5e43feb3819-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "17062cd9-24b6-489a-8474-c5e43feb3819" (UID: "17062cd9-24b6-489a-8474-c5e43feb3819"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 10:55:26 crc kubenswrapper[4783]: I1002 10:55:26.970791 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17062cd9-24b6-489a-8474-c5e43feb3819-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "17062cd9-24b6-489a-8474-c5e43feb3819" (UID: "17062cd9-24b6-489a-8474-c5e43feb3819"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:55:27 crc kubenswrapper[4783]: I1002 10:55:27.067063 4783 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/17062cd9-24b6-489a-8474-c5e43feb3819-kubelet-dir\") on node \"crc\" DevicePath \"\"" Oct 02 10:55:27 crc kubenswrapper[4783]: I1002 10:55:27.067146 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/17062cd9-24b6-489a-8474-c5e43feb3819-kube-api-access\") on node \"crc\" DevicePath \"\"" Oct 02 10:55:27 crc kubenswrapper[4783]: I1002 10:55:27.557043 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"17062cd9-24b6-489a-8474-c5e43feb3819","Type":"ContainerDied","Data":"6412b4d31352ff99f750d8170fa43a842945c6fad23cc9742b485d820ea4b2df"} Oct 02 10:55:27 crc kubenswrapper[4783]: I1002 10:55:27.557083 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6412b4d31352ff99f750d8170fa43a842945c6fad23cc9742b485d820ea4b2df" Oct 02 10:55:27 crc kubenswrapper[4783]: I1002 10:55:27.557091 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Oct 02 10:55:27 crc kubenswrapper[4783]: I1002 10:55:27.671213 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:27 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:27 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:27 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:27 crc kubenswrapper[4783]: I1002 10:55:27.671284 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:28 crc kubenswrapper[4783]: I1002 10:55:28.665067 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:28 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:28 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:28 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:28 crc kubenswrapper[4783]: I1002 10:55:28.665130 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:29 crc kubenswrapper[4783]: I1002 10:55:29.679838 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:29 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:29 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:29 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:29 crc kubenswrapper[4783]: I1002 10:55:29.680107 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.205574 4783 patch_prober.go:28] interesting pod/console-f9d7485db-vjcp4 container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.32:8443/health\": dial tcp 10.217.0.32:8443: connect: connection refused" start-of-body= Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.205617 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-vjcp4" podUID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" containerName="console" probeResult="failure" output="Get \"https://10.217.0.32:8443/health\": dial tcp 10.217.0.32:8443: connect: connection refused" Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.283487 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.284112 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.283746 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.284206 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.667123 4783 patch_prober.go:28] interesting pod/router-default-5444994796-zwxct container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Oct 02 10:55:30 crc kubenswrapper[4783]: [-]has-synced failed: reason withheld Oct 02 10:55:30 crc kubenswrapper[4783]: [+]process-running ok Oct 02 10:55:30 crc kubenswrapper[4783]: healthz check failed Oct 02 10:55:30 crc kubenswrapper[4783]: I1002 10:55:30.667189 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-zwxct" podUID="7567048e-a0b0-46e5-b4bf-51180f84b884" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 10:55:31 crc kubenswrapper[4783]: I1002 10:55:31.665902 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:31 crc kubenswrapper[4783]: I1002 10:55:31.668516 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-zwxct" Oct 02 10:55:32 crc kubenswrapper[4783]: I1002 10:55:32.442839 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:55:32 crc kubenswrapper[4783]: I1002 10:55:32.449299 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/f05f5bf0-b0a7-453b-999b-8ef23ca6cc68-metrics-certs\") pod \"network-metrics-daemon-6qbg4\" (UID: \"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68\") " pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:55:32 crc kubenswrapper[4783]: I1002 10:55:32.668708 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-6qbg4" Oct 02 10:55:33 crc kubenswrapper[4783]: I1002 10:55:33.232597 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.209103 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.213940 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.283219 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.283280 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.283335 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.283632 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.283681 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.283976 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"616dca96f06b63f9999f042cfd4448105312305129b42d27fe7553fbdad269c2"} pod="openshift-console/downloads-7954f5f757-r6pqv" containerMessage="Container download-server failed liveness probe, will be restarted" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.284069 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" containerID="cri-o://616dca96f06b63f9999f042cfd4448105312305129b42d27fe7553fbdad269c2" gracePeriod=2 Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.284231 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.284281 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.641962 4783 generic.go:334] "Generic (PLEG): container finished" podID="71af7827-a6d5-4a87-9839-996ab528213d" containerID="616dca96f06b63f9999f042cfd4448105312305129b42d27fe7553fbdad269c2" exitCode=0 Oct 02 10:55:40 crc kubenswrapper[4783]: I1002 10:55:40.642059 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r6pqv" event={"ID":"71af7827-a6d5-4a87-9839-996ab528213d","Type":"ContainerDied","Data":"616dca96f06b63f9999f042cfd4448105312305129b42d27fe7553fbdad269c2"} Oct 02 10:55:43 crc kubenswrapper[4783]: I1002 10:55:43.237372 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 10:55:50 crc kubenswrapper[4783]: I1002 10:55:50.283894 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:55:50 crc kubenswrapper[4783]: I1002 10:55:50.284523 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:55:51 crc kubenswrapper[4783]: I1002 10:55:51.072621 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-rfpcx" Oct 02 10:55:51 crc kubenswrapper[4783]: I1002 10:55:51.514211 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 10:55:51 crc kubenswrapper[4783]: I1002 10:55:51.514302 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 10:55:56 crc kubenswrapper[4783]: I1002 10:55:56.717808 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Oct 02 10:56:00 crc kubenswrapper[4783]: I1002 10:56:00.284148 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:00 crc kubenswrapper[4783]: I1002 10:56:00.284660 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:10 crc kubenswrapper[4783]: I1002 10:56:10.283340 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:10 crc kubenswrapper[4783]: I1002 10:56:10.284192 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:20 crc kubenswrapper[4783]: I1002 10:56:20.283006 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:20 crc kubenswrapper[4783]: I1002 10:56:20.283647 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:21 crc kubenswrapper[4783]: I1002 10:56:21.513994 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 10:56:21 crc kubenswrapper[4783]: I1002 10:56:21.514049 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 10:56:21 crc kubenswrapper[4783]: I1002 10:56:21.514098 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:56:21 crc kubenswrapper[4783]: I1002 10:56:21.514698 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 10:56:21 crc kubenswrapper[4783]: I1002 10:56:21.514763 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5" gracePeriod=600 Oct 02 10:56:24 crc kubenswrapper[4783]: I1002 10:56:24.916149 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5" exitCode=0 Oct 02 10:56:24 crc kubenswrapper[4783]: I1002 10:56:24.916185 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5"} Oct 02 10:56:25 crc kubenswrapper[4783]: E1002 10:56:25.631944 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fc6d1468707e4bcc767e25ba90e295828fee37cd04f9ceaa879288e8fb4d2d84: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:fc6d1468707e4bcc767e25ba90e295828fee37cd04f9ceaa879288e8fb4d2d84\": context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 02 10:56:25 crc kubenswrapper[4783]: E1002 10:56:25.632583 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-psw82,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-5gvbx_openshift-marketplace(9a31888b-bb76-4ec4-91fe-745018b2e966): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fc6d1468707e4bcc767e25ba90e295828fee37cd04f9ceaa879288e8fb4d2d84: Get \"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:fc6d1468707e4bcc767e25ba90e295828fee37cd04f9ceaa879288e8fb4d2d84\": context canceled" logger="UnhandledError" Oct 02 10:56:25 crc kubenswrapper[4783]: E1002 10:56:25.633980 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: reading blob sha256:fc6d1468707e4bcc767e25ba90e295828fee37cd04f9ceaa879288e8fb4d2d84: Get \\\"https://registry.redhat.io/v2/redhat/redhat-operator-index/blobs/sha256:fc6d1468707e4bcc767e25ba90e295828fee37cd04f9ceaa879288e8fb4d2d84\\\": context canceled\"" pod="openshift-marketplace/redhat-operators-5gvbx" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" Oct 02 10:56:28 crc kubenswrapper[4783]: E1002 10:56:28.663578 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-5gvbx" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" Oct 02 10:56:28 crc kubenswrapper[4783]: E1002 10:56:28.790935 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 02 10:56:28 crc kubenswrapper[4783]: E1002 10:56:28.791105 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ljrl2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-cbwgv_openshift-marketplace(37f50271-72da-49ab-972e-ef5cb30020cc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 02 10:56:28 crc kubenswrapper[4783]: E1002 10:56:28.792299 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-cbwgv" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" Oct 02 10:56:28 crc kubenswrapper[4783]: E1002 10:56:28.809398 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Oct 02 10:56:28 crc kubenswrapper[4783]: E1002 10:56:28.809911 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-fds57,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-cs782_openshift-marketplace(4e629356-d5af-454b-8451-c651ccf13b32): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 02 10:56:28 crc kubenswrapper[4783]: E1002 10:56:28.811072 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-cs782" podUID="4e629356-d5af-454b-8451-c651ccf13b32" Oct 02 10:56:30 crc kubenswrapper[4783]: I1002 10:56:30.283601 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:30 crc kubenswrapper[4783]: I1002 10:56:30.283655 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:31 crc kubenswrapper[4783]: E1002 10:56:31.611680 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-cbwgv" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" Oct 02 10:56:31 crc kubenswrapper[4783]: E1002 10:56:31.611730 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-cs782" podUID="4e629356-d5af-454b-8451-c651ccf13b32" Oct 02 10:56:33 crc kubenswrapper[4783]: E1002 10:56:33.026098 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 02 10:56:33 crc kubenswrapper[4783]: E1002 10:56:33.026716 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-bzslp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-zxxz4_openshift-marketplace(31f92601-c54c-4f96-88db-35fe1673fe40): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 02 10:56:33 crc kubenswrapper[4783]: E1002 10:56:33.027952 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-zxxz4" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" Oct 02 10:56:33 crc kubenswrapper[4783]: E1002 10:56:33.141400 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Oct 02 10:56:33 crc kubenswrapper[4783]: E1002 10:56:33.141579 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kpzxw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-97zw7_openshift-marketplace(b241b260-2dbd-4383-8a28-b8728e86a605): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 02 10:56:33 crc kubenswrapper[4783]: E1002 10:56:33.142852 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-97zw7" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" Oct 02 10:56:36 crc kubenswrapper[4783]: E1002 10:56:36.173505 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-zxxz4" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" Oct 02 10:56:36 crc kubenswrapper[4783]: E1002 10:56:36.175668 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-97zw7" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" Oct 02 10:56:36 crc kubenswrapper[4783]: E1002 10:56:36.261776 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Oct 02 10:56:36 crc kubenswrapper[4783]: E1002 10:56:36.262005 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-cjshf,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-ncpxv_openshift-marketplace(797d3b59-9112-4efe-bd7d-34718545bc1c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 02 10:56:36 crc kubenswrapper[4783]: E1002 10:56:36.263133 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-ncpxv" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" Oct 02 10:56:38 crc kubenswrapper[4783]: E1002 10:56:38.650702 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 02 10:56:38 crc kubenswrapper[4783]: E1002 10:56:38.651561 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d4gmk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-4ntdb_openshift-marketplace(c97a1686-3bf0-4739-a538-a191023eff38): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 02 10:56:38 crc kubenswrapper[4783]: E1002 10:56:38.652771 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-4ntdb" podUID="c97a1686-3bf0-4739-a538-a191023eff38" Oct 02 10:56:38 crc kubenswrapper[4783]: E1002 10:56:38.741574 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Oct 02 10:56:38 crc kubenswrapper[4783]: E1002 10:56:38.741800 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-wzvc2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-h7sxv_openshift-marketplace(df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Oct 02 10:56:38 crc kubenswrapper[4783]: E1002 10:56:38.742903 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-h7sxv" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" Oct 02 10:56:38 crc kubenswrapper[4783]: I1002 10:56:38.776607 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-6qbg4"] Oct 02 10:56:38 crc kubenswrapper[4783]: I1002 10:56:38.993029 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" event={"ID":"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68","Type":"ContainerStarted","Data":"60469ff92b729c2acfa4ed2478b5434f522922977111a3c0cd1046d107be66b5"} Oct 02 10:56:38 crc kubenswrapper[4783]: I1002 10:56:38.996029 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-r6pqv" event={"ID":"71af7827-a6d5-4a87-9839-996ab528213d","Type":"ContainerStarted","Data":"2b6fb1ca0bd29344e40274651743b6be2a4d1d038b89df99a4471d66698e5cb7"} Oct 02 10:56:38 crc kubenswrapper[4783]: I1002 10:56:38.996437 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:56:38 crc kubenswrapper[4783]: I1002 10:56:38.996729 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:38 crc kubenswrapper[4783]: I1002 10:56:38.996870 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:39 crc kubenswrapper[4783]: I1002 10:56:39.000875 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"71a46c0d678687b26ff4e9327de076d3d20fdca296f5eb412b9a94a9f573bc25"} Oct 02 10:56:39 crc kubenswrapper[4783]: E1002 10:56:39.002730 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-h7sxv" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" Oct 02 10:56:39 crc kubenswrapper[4783]: E1002 10:56:39.006747 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-4ntdb" podUID="c97a1686-3bf0-4739-a538-a191023eff38" Oct 02 10:56:40 crc kubenswrapper[4783]: I1002 10:56:40.010650 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" event={"ID":"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68","Type":"ContainerStarted","Data":"e63eb6e13d516bf8f037827e759a08de09c1b333ceb06aa973441c951789cd90"} Oct 02 10:56:40 crc kubenswrapper[4783]: I1002 10:56:40.011608 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:40 crc kubenswrapper[4783]: I1002 10:56:40.011680 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:40 crc kubenswrapper[4783]: I1002 10:56:40.287507 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:40 crc kubenswrapper[4783]: I1002 10:56:40.287597 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:40 crc kubenswrapper[4783]: I1002 10:56:40.288087 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:40 crc kubenswrapper[4783]: I1002 10:56:40.288133 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:43 crc kubenswrapper[4783]: I1002 10:56:43.045321 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-6qbg4" event={"ID":"f05f5bf0-b0a7-453b-999b-8ef23ca6cc68","Type":"ContainerStarted","Data":"a5bad2f1ed20231d77028a4d95315bface4175c66fb6660deaa6017fc99468b8"} Oct 02 10:56:44 crc kubenswrapper[4783]: I1002 10:56:44.084047 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-6qbg4" podStartSLOduration=215.084012748 podStartE2EDuration="3m35.084012748s" podCreationTimestamp="2025-10-02 10:53:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:56:44.076459993 +0000 UTC m=+237.392654304" watchObservedRunningTime="2025-10-02 10:56:44.084012748 +0000 UTC m=+237.400207089" Oct 02 10:56:50 crc kubenswrapper[4783]: I1002 10:56:50.283125 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:50 crc kubenswrapper[4783]: I1002 10:56:50.284612 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:56:50 crc kubenswrapper[4783]: I1002 10:56:50.283124 4783 patch_prober.go:28] interesting pod/downloads-7954f5f757-r6pqv container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" start-of-body= Oct 02 10:56:50 crc kubenswrapper[4783]: I1002 10:56:50.284741 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-r6pqv" podUID="71af7827-a6d5-4a87-9839-996ab528213d" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.12:8080/\": dial tcp 10.217.0.12:8080: connect: connection refused" Oct 02 10:57:00 crc kubenswrapper[4783]: I1002 10:57:00.290346 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-r6pqv" Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.281179 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7sxv" event={"ID":"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850","Type":"ContainerStarted","Data":"b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c"} Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.287272 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerStarted","Data":"5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815"} Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.290375 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncpxv" event={"ID":"797d3b59-9112-4efe-bd7d-34718545bc1c","Type":"ContainerStarted","Data":"2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822"} Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.292498 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerStarted","Data":"e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8"} Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.299335 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gvbx" event={"ID":"9a31888b-bb76-4ec4-91fe-745018b2e966","Type":"ContainerStarted","Data":"c15a35e9ea4b9668914407c0770e32ffe2c0d2a2ea8e73774ab20d47860e6bb0"} Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.302524 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerStarted","Data":"19c8f24a31fd3f8078c58cdcbb6ca83bab622fc42e3be7ea53104abc003a4ddd"} Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.305498 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxxz4" event={"ID":"31f92601-c54c-4f96-88db-35fe1673fe40","Type":"ContainerStarted","Data":"ff9cfba3cd5abe58585db567652d4049b9c35749e0ab86eaed273f3d722d2db7"} Oct 02 10:57:17 crc kubenswrapper[4783]: I1002 10:57:17.307686 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ntdb" event={"ID":"c97a1686-3bf0-4739-a538-a191023eff38","Type":"ContainerStarted","Data":"e7f746ca9a5130b68ecd379e7d9453c99f2c2b18210caa915e0e777629b5adb0"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.315174 4783 generic.go:334] "Generic (PLEG): container finished" podID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerID="2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.315248 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncpxv" event={"ID":"797d3b59-9112-4efe-bd7d-34718545bc1c","Type":"ContainerDied","Data":"2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.317310 4783 generic.go:334] "Generic (PLEG): container finished" podID="b241b260-2dbd-4383-8a28-b8728e86a605" containerID="e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.317367 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerDied","Data":"e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.323002 4783 generic.go:334] "Generic (PLEG): container finished" podID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerID="c15a35e9ea4b9668914407c0770e32ffe2c0d2a2ea8e73774ab20d47860e6bb0" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.323133 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gvbx" event={"ID":"9a31888b-bb76-4ec4-91fe-745018b2e966","Type":"ContainerDied","Data":"c15a35e9ea4b9668914407c0770e32ffe2c0d2a2ea8e73774ab20d47860e6bb0"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.325559 4783 generic.go:334] "Generic (PLEG): container finished" podID="37f50271-72da-49ab-972e-ef5cb30020cc" containerID="19c8f24a31fd3f8078c58cdcbb6ca83bab622fc42e3be7ea53104abc003a4ddd" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.325622 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerDied","Data":"19c8f24a31fd3f8078c58cdcbb6ca83bab622fc42e3be7ea53104abc003a4ddd"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.332935 4783 generic.go:334] "Generic (PLEG): container finished" podID="31f92601-c54c-4f96-88db-35fe1673fe40" containerID="ff9cfba3cd5abe58585db567652d4049b9c35749e0ab86eaed273f3d722d2db7" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.333054 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxxz4" event={"ID":"31f92601-c54c-4f96-88db-35fe1673fe40","Type":"ContainerDied","Data":"ff9cfba3cd5abe58585db567652d4049b9c35749e0ab86eaed273f3d722d2db7"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.342785 4783 generic.go:334] "Generic (PLEG): container finished" podID="c97a1686-3bf0-4739-a538-a191023eff38" containerID="e7f746ca9a5130b68ecd379e7d9453c99f2c2b18210caa915e0e777629b5adb0" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.342872 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ntdb" event={"ID":"c97a1686-3bf0-4739-a538-a191023eff38","Type":"ContainerDied","Data":"e7f746ca9a5130b68ecd379e7d9453c99f2c2b18210caa915e0e777629b5adb0"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.347192 4783 generic.go:334] "Generic (PLEG): container finished" podID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerID="b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.347240 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7sxv" event={"ID":"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850","Type":"ContainerDied","Data":"b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c"} Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.358707 4783 generic.go:334] "Generic (PLEG): container finished" podID="4e629356-d5af-454b-8451-c651ccf13b32" containerID="5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815" exitCode=0 Oct 02 10:57:18 crc kubenswrapper[4783]: I1002 10:57:18.358739 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerDied","Data":"5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.366862 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxxz4" event={"ID":"31f92601-c54c-4f96-88db-35fe1673fe40","Type":"ContainerStarted","Data":"3f8a841590f268f2e4363ca48264a21154a560f8a116f632d4eca4be552ee238"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.370003 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ntdb" event={"ID":"c97a1686-3bf0-4739-a538-a191023eff38","Type":"ContainerStarted","Data":"443b573273502e5bfa87ad5e27b4c1f63ba9a3735feb3c314c3b02f61b48c97a"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.373764 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7sxv" event={"ID":"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850","Type":"ContainerStarted","Data":"c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.379067 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerStarted","Data":"6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.383720 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncpxv" event={"ID":"797d3b59-9112-4efe-bd7d-34718545bc1c","Type":"ContainerStarted","Data":"65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.384850 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-zxxz4" podStartSLOduration=4.054922894 podStartE2EDuration="2m0.384835573s" podCreationTimestamp="2025-10-02 10:55:19 +0000 UTC" firstStartedPulling="2025-10-02 10:55:22.439397333 +0000 UTC m=+155.755591584" lastFinishedPulling="2025-10-02 10:57:18.769310002 +0000 UTC m=+272.085504263" observedRunningTime="2025-10-02 10:57:19.383317453 +0000 UTC m=+272.699511714" watchObservedRunningTime="2025-10-02 10:57:19.384835573 +0000 UTC m=+272.701029834" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.385947 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerStarted","Data":"8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.387922 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gvbx" event={"ID":"9a31888b-bb76-4ec4-91fe-745018b2e966","Type":"ContainerStarted","Data":"cd31cb0e0eefc49706d1aeaeb5cf241a4c0a6f96a9f416f1a7936821a8082fd9"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.391671 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerStarted","Data":"adc8ff68336a3f50241241436647f9e6ff048dce5932e64c92268e143e9b475f"} Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.421312 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h7sxv" podStartSLOduration=2.7651442939999997 podStartE2EDuration="1m58.421293192s" podCreationTimestamp="2025-10-02 10:55:21 +0000 UTC" firstStartedPulling="2025-10-02 10:55:23.523614061 +0000 UTC m=+156.839808322" lastFinishedPulling="2025-10-02 10:57:19.179762959 +0000 UTC m=+272.495957220" observedRunningTime="2025-10-02 10:57:19.418260434 +0000 UTC m=+272.734454695" watchObservedRunningTime="2025-10-02 10:57:19.421293192 +0000 UTC m=+272.737487453" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.436159 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cs782" podStartSLOduration=4.874041695 podStartE2EDuration="2m0.436146132s" podCreationTimestamp="2025-10-02 10:55:19 +0000 UTC" firstStartedPulling="2025-10-02 10:55:23.477649093 +0000 UTC m=+156.793843354" lastFinishedPulling="2025-10-02 10:57:19.03975353 +0000 UTC m=+272.355947791" observedRunningTime="2025-10-02 10:57:19.435358157 +0000 UTC m=+272.751552418" watchObservedRunningTime="2025-10-02 10:57:19.436146132 +0000 UTC m=+272.752340393" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.486965 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-4ntdb" podStartSLOduration=3.216147444 podStartE2EDuration="1m58.486944646s" podCreationTimestamp="2025-10-02 10:55:21 +0000 UTC" firstStartedPulling="2025-10-02 10:55:23.523592661 +0000 UTC m=+156.839786922" lastFinishedPulling="2025-10-02 10:57:18.794389863 +0000 UTC m=+272.110584124" observedRunningTime="2025-10-02 10:57:19.456494261 +0000 UTC m=+272.772688522" watchObservedRunningTime="2025-10-02 10:57:19.486944646 +0000 UTC m=+272.803138907" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.513234 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5gvbx" podStartSLOduration=4.080686546 podStartE2EDuration="1m57.513213215s" podCreationTimestamp="2025-10-02 10:55:22 +0000 UTC" firstStartedPulling="2025-10-02 10:55:25.532277216 +0000 UTC m=+158.848471477" lastFinishedPulling="2025-10-02 10:57:18.964803885 +0000 UTC m=+272.280998146" observedRunningTime="2025-10-02 10:57:19.487324388 +0000 UTC m=+272.803518649" watchObservedRunningTime="2025-10-02 10:57:19.513213215 +0000 UTC m=+272.829407476" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.514433 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-97zw7" podStartSLOduration=4.106958859 podStartE2EDuration="2m0.514425854s" podCreationTimestamp="2025-10-02 10:55:19 +0000 UTC" firstStartedPulling="2025-10-02 10:55:22.462131321 +0000 UTC m=+155.778325582" lastFinishedPulling="2025-10-02 10:57:18.869598306 +0000 UTC m=+272.185792577" observedRunningTime="2025-10-02 10:57:19.511240641 +0000 UTC m=+272.827434912" watchObservedRunningTime="2025-10-02 10:57:19.514425854 +0000 UTC m=+272.830620115" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.535512 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-ncpxv" podStartSLOduration=4.197628496 podStartE2EDuration="1m57.535495966s" podCreationTimestamp="2025-10-02 10:55:22 +0000 UTC" firstStartedPulling="2025-10-02 10:55:25.547663921 +0000 UTC m=+158.863858182" lastFinishedPulling="2025-10-02 10:57:18.885531391 +0000 UTC m=+272.201725652" observedRunningTime="2025-10-02 10:57:19.534895727 +0000 UTC m=+272.851089988" watchObservedRunningTime="2025-10-02 10:57:19.535495966 +0000 UTC m=+272.851690227" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.562541 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cbwgv" podStartSLOduration=5.3183049780000005 podStartE2EDuration="2m0.56252433s" podCreationTimestamp="2025-10-02 10:55:19 +0000 UTC" firstStartedPulling="2025-10-02 10:55:23.502120586 +0000 UTC m=+156.818314847" lastFinishedPulling="2025-10-02 10:57:18.746339938 +0000 UTC m=+272.062534199" observedRunningTime="2025-10-02 10:57:19.559943487 +0000 UTC m=+272.876137768" watchObservedRunningTime="2025-10-02 10:57:19.56252433 +0000 UTC m=+272.878718591" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.645585 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.645742 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.785909 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:57:19 crc kubenswrapper[4783]: I1002 10:57:19.786257 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:57:20 crc kubenswrapper[4783]: I1002 10:57:20.845654 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:57:20 crc kubenswrapper[4783]: I1002 10:57:20.845968 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:57:20 crc kubenswrapper[4783]: I1002 10:57:20.845980 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:57:20 crc kubenswrapper[4783]: I1002 10:57:20.845988 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:57:20 crc kubenswrapper[4783]: I1002 10:57:20.891382 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-97zw7" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="registry-server" probeResult="failure" output=< Oct 02 10:57:20 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 10:57:20 crc kubenswrapper[4783]: > Oct 02 10:57:20 crc kubenswrapper[4783]: I1002 10:57:20.898778 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-zxxz4" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="registry-server" probeResult="failure" output=< Oct 02 10:57:20 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 10:57:20 crc kubenswrapper[4783]: > Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.568850 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.569620 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.610650 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.883341 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-cbwgv" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="registry-server" probeResult="failure" output=< Oct 02 10:57:21 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 10:57:21 crc kubenswrapper[4783]: > Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.901540 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-cs782" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="registry-server" probeResult="failure" output=< Oct 02 10:57:21 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 10:57:21 crc kubenswrapper[4783]: > Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.943353 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.943471 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:57:21 crc kubenswrapper[4783]: I1002 10:57:21.983174 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:57:22 crc kubenswrapper[4783]: I1002 10:57:22.907210 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:57:22 crc kubenswrapper[4783]: I1002 10:57:22.907249 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:57:22 crc kubenswrapper[4783]: I1002 10:57:22.962923 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:57:22 crc kubenswrapper[4783]: I1002 10:57:22.962969 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:57:23 crc kubenswrapper[4783]: I1002 10:57:23.449302 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:57:23 crc kubenswrapper[4783]: I1002 10:57:23.942349 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-ncpxv" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="registry-server" probeResult="failure" output=< Oct 02 10:57:23 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 10:57:23 crc kubenswrapper[4783]: > Oct 02 10:57:24 crc kubenswrapper[4783]: I1002 10:57:24.001539 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-5gvbx" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="registry-server" probeResult="failure" output=< Oct 02 10:57:24 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 10:57:24 crc kubenswrapper[4783]: > Oct 02 10:57:26 crc kubenswrapper[4783]: I1002 10:57:25.928445 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ntdb"] Oct 02 10:57:26 crc kubenswrapper[4783]: I1002 10:57:25.928687 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-4ntdb" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="registry-server" containerID="cri-o://443b573273502e5bfa87ad5e27b4c1f63ba9a3735feb3c314c3b02f61b48c97a" gracePeriod=2 Oct 02 10:57:27 crc kubenswrapper[4783]: I1002 10:57:27.430889 4783 generic.go:334] "Generic (PLEG): container finished" podID="c97a1686-3bf0-4739-a538-a191023eff38" containerID="443b573273502e5bfa87ad5e27b4c1f63ba9a3735feb3c314c3b02f61b48c97a" exitCode=0 Oct 02 10:57:27 crc kubenswrapper[4783]: I1002 10:57:27.430964 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ntdb" event={"ID":"c97a1686-3bf0-4739-a538-a191023eff38","Type":"ContainerDied","Data":"443b573273502e5bfa87ad5e27b4c1f63ba9a3735feb3c314c3b02f61b48c97a"} Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.132278 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.259206 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-utilities\") pod \"c97a1686-3bf0-4739-a538-a191023eff38\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.259276 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4gmk\" (UniqueName: \"kubernetes.io/projected/c97a1686-3bf0-4739-a538-a191023eff38-kube-api-access-d4gmk\") pod \"c97a1686-3bf0-4739-a538-a191023eff38\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.259309 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-catalog-content\") pod \"c97a1686-3bf0-4739-a538-a191023eff38\" (UID: \"c97a1686-3bf0-4739-a538-a191023eff38\") " Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.260240 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-utilities" (OuterVolumeSpecName: "utilities") pod "c97a1686-3bf0-4739-a538-a191023eff38" (UID: "c97a1686-3bf0-4739-a538-a191023eff38"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.265664 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c97a1686-3bf0-4739-a538-a191023eff38-kube-api-access-d4gmk" (OuterVolumeSpecName: "kube-api-access-d4gmk") pod "c97a1686-3bf0-4739-a538-a191023eff38" (UID: "c97a1686-3bf0-4739-a538-a191023eff38"). InnerVolumeSpecName "kube-api-access-d4gmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.276735 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c97a1686-3bf0-4739-a538-a191023eff38" (UID: "c97a1686-3bf0-4739-a538-a191023eff38"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.361150 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.361184 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4gmk\" (UniqueName: \"kubernetes.io/projected/c97a1686-3bf0-4739-a538-a191023eff38-kube-api-access-d4gmk\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.361197 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c97a1686-3bf0-4739-a538-a191023eff38-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.437479 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-4ntdb" event={"ID":"c97a1686-3bf0-4739-a538-a191023eff38","Type":"ContainerDied","Data":"591e1fb7381805018a78b794a4d8322b0e474091bb35ac29784ded1cf82d6f0b"} Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.437802 4783 scope.go:117] "RemoveContainer" containerID="443b573273502e5bfa87ad5e27b4c1f63ba9a3735feb3c314c3b02f61b48c97a" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.437606 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-4ntdb" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.454460 4783 scope.go:117] "RemoveContainer" containerID="e7f746ca9a5130b68ecd379e7d9453c99f2c2b18210caa915e0e777629b5adb0" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.474818 4783 scope.go:117] "RemoveContainer" containerID="a8b472fcd0c864d83710d378367277c9c2f61326379a369993cdf88828fdd329" Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.478352 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ntdb"] Oct 02 10:57:28 crc kubenswrapper[4783]: I1002 10:57:28.483581 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-4ntdb"] Oct 02 10:57:29 crc kubenswrapper[4783]: I1002 10:57:29.554986 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c97a1686-3bf0-4739-a538-a191023eff38" path="/var/lib/kubelet/pods/c97a1686-3bf0-4739-a538-a191023eff38/volumes" Oct 02 10:57:29 crc kubenswrapper[4783]: I1002 10:57:29.685445 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:57:29 crc kubenswrapper[4783]: I1002 10:57:29.723143 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:57:29 crc kubenswrapper[4783]: I1002 10:57:29.834090 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:57:29 crc kubenswrapper[4783]: I1002 10:57:29.885667 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:57:30 crc kubenswrapper[4783]: I1002 10:57:30.881731 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:57:30 crc kubenswrapper[4783]: I1002 10:57:30.889924 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:57:30 crc kubenswrapper[4783]: I1002 10:57:30.929312 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:57:30 crc kubenswrapper[4783]: I1002 10:57:30.930342 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zxxz4"] Oct 02 10:57:30 crc kubenswrapper[4783]: I1002 10:57:30.943594 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:57:31 crc kubenswrapper[4783]: I1002 10:57:31.451996 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-zxxz4" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="registry-server" containerID="cri-o://3f8a841590f268f2e4363ca48264a21154a560f8a116f632d4eca4be552ee238" gracePeriod=2 Oct 02 10:57:31 crc kubenswrapper[4783]: I1002 10:57:31.602991 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.461273 4783 generic.go:334] "Generic (PLEG): container finished" podID="31f92601-c54c-4f96-88db-35fe1673fe40" containerID="3f8a841590f268f2e4363ca48264a21154a560f8a116f632d4eca4be552ee238" exitCode=0 Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.461659 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxxz4" event={"ID":"31f92601-c54c-4f96-88db-35fe1673fe40","Type":"ContainerDied","Data":"3f8a841590f268f2e4363ca48264a21154a560f8a116f632d4eca4be552ee238"} Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.573585 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.612167 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-utilities\") pod \"31f92601-c54c-4f96-88db-35fe1673fe40\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.612222 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-catalog-content\") pod \"31f92601-c54c-4f96-88db-35fe1673fe40\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.612253 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bzslp\" (UniqueName: \"kubernetes.io/projected/31f92601-c54c-4f96-88db-35fe1673fe40-kube-api-access-bzslp\") pod \"31f92601-c54c-4f96-88db-35fe1673fe40\" (UID: \"31f92601-c54c-4f96-88db-35fe1673fe40\") " Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.613345 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-utilities" (OuterVolumeSpecName: "utilities") pod "31f92601-c54c-4f96-88db-35fe1673fe40" (UID: "31f92601-c54c-4f96-88db-35fe1673fe40"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.613714 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.617861 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31f92601-c54c-4f96-88db-35fe1673fe40-kube-api-access-bzslp" (OuterVolumeSpecName: "kube-api-access-bzslp") pod "31f92601-c54c-4f96-88db-35fe1673fe40" (UID: "31f92601-c54c-4f96-88db-35fe1673fe40"). InnerVolumeSpecName "kube-api-access-bzslp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.648662 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "31f92601-c54c-4f96-88db-35fe1673fe40" (UID: "31f92601-c54c-4f96-88db-35fe1673fe40"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.714728 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/31f92601-c54c-4f96-88db-35fe1673fe40-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.714762 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bzslp\" (UniqueName: \"kubernetes.io/projected/31f92601-c54c-4f96-88db-35fe1673fe40-kube-api-access-bzslp\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.944543 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:57:32 crc kubenswrapper[4783]: I1002 10:57:32.999897 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.010134 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.054237 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.125598 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cbwgv"] Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.125889 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cbwgv" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="registry-server" containerID="cri-o://adc8ff68336a3f50241241436647f9e6ff048dce5932e64c92268e143e9b475f" gracePeriod=2 Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.468488 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-zxxz4" event={"ID":"31f92601-c54c-4f96-88db-35fe1673fe40","Type":"ContainerDied","Data":"87f2e19a0146e325ee2e3327a00d0aa6448bc913dd149e4e3b3987ffcce503d5"} Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.468825 4783 scope.go:117] "RemoveContainer" containerID="3f8a841590f268f2e4363ca48264a21154a560f8a116f632d4eca4be552ee238" Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.468583 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-zxxz4" Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.493852 4783 scope.go:117] "RemoveContainer" containerID="ff9cfba3cd5abe58585db567652d4049b9c35749e0ab86eaed273f3d722d2db7" Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.494429 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-zxxz4"] Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.497070 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-zxxz4"] Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.508344 4783 scope.go:117] "RemoveContainer" containerID="f64f3cdb929ab7a818497f660dbeb543e8de41533b1fb5fddc66b9fcd2cd9c9b" Oct 02 10:57:33 crc kubenswrapper[4783]: I1002 10:57:33.550267 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" path="/var/lib/kubelet/pods/31f92601-c54c-4f96-88db-35fe1673fe40/volumes" Oct 02 10:57:34 crc kubenswrapper[4783]: I1002 10:57:34.475857 4783 generic.go:334] "Generic (PLEG): container finished" podID="37f50271-72da-49ab-972e-ef5cb30020cc" containerID="adc8ff68336a3f50241241436647f9e6ff048dce5932e64c92268e143e9b475f" exitCode=0 Oct 02 10:57:34 crc kubenswrapper[4783]: I1002 10:57:34.475916 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerDied","Data":"adc8ff68336a3f50241241436647f9e6ff048dce5932e64c92268e143e9b475f"} Oct 02 10:57:34 crc kubenswrapper[4783]: I1002 10:57:34.964059 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.044845 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-catalog-content\") pod \"37f50271-72da-49ab-972e-ef5cb30020cc\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.044884 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-utilities\") pod \"37f50271-72da-49ab-972e-ef5cb30020cc\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.044914 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ljrl2\" (UniqueName: \"kubernetes.io/projected/37f50271-72da-49ab-972e-ef5cb30020cc-kube-api-access-ljrl2\") pod \"37f50271-72da-49ab-972e-ef5cb30020cc\" (UID: \"37f50271-72da-49ab-972e-ef5cb30020cc\") " Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.045566 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-utilities" (OuterVolumeSpecName: "utilities") pod "37f50271-72da-49ab-972e-ef5cb30020cc" (UID: "37f50271-72da-49ab-972e-ef5cb30020cc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.048884 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37f50271-72da-49ab-972e-ef5cb30020cc-kube-api-access-ljrl2" (OuterVolumeSpecName: "kube-api-access-ljrl2") pod "37f50271-72da-49ab-972e-ef5cb30020cc" (UID: "37f50271-72da-49ab-972e-ef5cb30020cc"). InnerVolumeSpecName "kube-api-access-ljrl2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.084736 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "37f50271-72da-49ab-972e-ef5cb30020cc" (UID: "37f50271-72da-49ab-972e-ef5cb30020cc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.146384 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.146428 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/37f50271-72da-49ab-972e-ef5cb30020cc-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.146438 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ljrl2\" (UniqueName: \"kubernetes.io/projected/37f50271-72da-49ab-972e-ef5cb30020cc-kube-api-access-ljrl2\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.483923 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cbwgv" event={"ID":"37f50271-72da-49ab-972e-ef5cb30020cc","Type":"ContainerDied","Data":"bc8fb8ba0a93baddba4dcfd867124464efca82b6b2b32dddca5e7d50ff0b1553"} Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.483975 4783 scope.go:117] "RemoveContainer" containerID="adc8ff68336a3f50241241436647f9e6ff048dce5932e64c92268e143e9b475f" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.483981 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cbwgv" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.499080 4783 scope.go:117] "RemoveContainer" containerID="19c8f24a31fd3f8078c58cdcbb6ca83bab622fc42e3be7ea53104abc003a4ddd" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.513360 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cbwgv"] Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.516334 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cbwgv"] Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.525448 4783 scope.go:117] "RemoveContainer" containerID="18af1ce8df4a003cbf805491f175be3c4dea266e19c885583af6cd85ccfe4bc0" Oct 02 10:57:35 crc kubenswrapper[4783]: I1002 10:57:35.550317 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" path="/var/lib/kubelet/pods/37f50271-72da-49ab-972e-ef5cb30020cc/volumes" Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.326802 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5gvbx"] Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.327609 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-5gvbx" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="registry-server" containerID="cri-o://cd31cb0e0eefc49706d1aeaeb5cf241a4c0a6f96a9f416f1a7936821a8082fd9" gracePeriod=2 Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.500581 4783 generic.go:334] "Generic (PLEG): container finished" podID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerID="cd31cb0e0eefc49706d1aeaeb5cf241a4c0a6f96a9f416f1a7936821a8082fd9" exitCode=0 Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.500619 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gvbx" event={"ID":"9a31888b-bb76-4ec4-91fe-745018b2e966","Type":"ContainerDied","Data":"cd31cb0e0eefc49706d1aeaeb5cf241a4c0a6f96a9f416f1a7936821a8082fd9"} Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.654612 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.673625 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-catalog-content\") pod \"9a31888b-bb76-4ec4-91fe-745018b2e966\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.673789 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-psw82\" (UniqueName: \"kubernetes.io/projected/9a31888b-bb76-4ec4-91fe-745018b2e966-kube-api-access-psw82\") pod \"9a31888b-bb76-4ec4-91fe-745018b2e966\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.673812 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-utilities\") pod \"9a31888b-bb76-4ec4-91fe-745018b2e966\" (UID: \"9a31888b-bb76-4ec4-91fe-745018b2e966\") " Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.674867 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-utilities" (OuterVolumeSpecName: "utilities") pod "9a31888b-bb76-4ec4-91fe-745018b2e966" (UID: "9a31888b-bb76-4ec4-91fe-745018b2e966"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.678387 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9a31888b-bb76-4ec4-91fe-745018b2e966-kube-api-access-psw82" (OuterVolumeSpecName: "kube-api-access-psw82") pod "9a31888b-bb76-4ec4-91fe-745018b2e966" (UID: "9a31888b-bb76-4ec4-91fe-745018b2e966"). InnerVolumeSpecName "kube-api-access-psw82". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.751909 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9a31888b-bb76-4ec4-91fe-745018b2e966" (UID: "9a31888b-bb76-4ec4-91fe-745018b2e966"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.774686 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-psw82\" (UniqueName: \"kubernetes.io/projected/9a31888b-bb76-4ec4-91fe-745018b2e966-kube-api-access-psw82\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.774719 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:37 crc kubenswrapper[4783]: I1002 10:57:37.774728 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9a31888b-bb76-4ec4-91fe-745018b2e966-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:38 crc kubenswrapper[4783]: I1002 10:57:38.507862 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5gvbx" event={"ID":"9a31888b-bb76-4ec4-91fe-745018b2e966","Type":"ContainerDied","Data":"043dacd7ad448c1839d8f41844f65e2272afb763818d0c5229262fb13436aa3c"} Oct 02 10:57:38 crc kubenswrapper[4783]: I1002 10:57:38.508519 4783 scope.go:117] "RemoveContainer" containerID="cd31cb0e0eefc49706d1aeaeb5cf241a4c0a6f96a9f416f1a7936821a8082fd9" Oct 02 10:57:38 crc kubenswrapper[4783]: I1002 10:57:38.507930 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5gvbx" Oct 02 10:57:38 crc kubenswrapper[4783]: I1002 10:57:38.524201 4783 scope.go:117] "RemoveContainer" containerID="c15a35e9ea4b9668914407c0770e32ffe2c0d2a2ea8e73774ab20d47860e6bb0" Oct 02 10:57:38 crc kubenswrapper[4783]: I1002 10:57:38.538558 4783 scope.go:117] "RemoveContainer" containerID="4d4e709fb0cdb005f439f2d5fcd17fdcf26b89fcf3bf1bc071baba38cf1bba9f" Oct 02 10:57:38 crc kubenswrapper[4783]: I1002 10:57:38.577941 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-5gvbx"] Oct 02 10:57:38 crc kubenswrapper[4783]: I1002 10:57:38.580890 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-5gvbx"] Oct 02 10:57:39 crc kubenswrapper[4783]: I1002 10:57:39.557289 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" path="/var/lib/kubelet/pods/9a31888b-bb76-4ec4-91fe-745018b2e966/volumes" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.695205 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-97zw7"] Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.696101 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-97zw7" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="registry-server" containerID="cri-o://8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302" gracePeriod=30 Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.710227 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cs782"] Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.710489 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cs782" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="registry-server" containerID="cri-o://6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0" gracePeriod=30 Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.719795 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-q7g9p"] Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.720029 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerName="marketplace-operator" containerID="cri-o://11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171" gracePeriod=30 Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.728675 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7sxv"] Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.729013 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h7sxv" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="registry-server" containerID="cri-o://c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210" gracePeriod=30 Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.742429 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ncpxv"] Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.742667 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-ncpxv" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="registry-server" containerID="cri-o://65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8" gracePeriod=30 Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755136 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qxbct"] Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755453 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755470 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755483 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755492 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755506 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755514 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755522 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42f92d75-c0d2-4935-a411-6c892019bde0" containerName="pruner" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755529 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="42f92d75-c0d2-4935-a411-6c892019bde0" containerName="pruner" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755539 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755546 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755555 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755562 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755570 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755577 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755589 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755596 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755606 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755613 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755621 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755628 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755643 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755649 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755660 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755666 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="extract-utilities" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755675 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17062cd9-24b6-489a-8474-c5e43feb3819" containerName="pruner" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755683 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="17062cd9-24b6-489a-8474-c5e43feb3819" containerName="pruner" Oct 02 10:57:49 crc kubenswrapper[4783]: E1002 10:57:49.755691 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755699 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="extract-content" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755803 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c97a1686-3bf0-4739-a538-a191023eff38" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755816 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="37f50271-72da-49ab-972e-ef5cb30020cc" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755829 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="9a31888b-bb76-4ec4-91fe-745018b2e966" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755840 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="42f92d75-c0d2-4935-a411-6c892019bde0" containerName="pruner" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755849 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="31f92601-c54c-4f96-88db-35fe1673fe40" containerName="registry-server" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.755861 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="17062cd9-24b6-489a-8474-c5e43feb3819" containerName="pruner" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.756335 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.772022 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qxbct"] Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.911760 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.911845 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:49 crc kubenswrapper[4783]: I1002 10:57:49.911877 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zjkpf\" (UniqueName: \"kubernetes.io/projected/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-kube-api-access-zjkpf\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.013334 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.013498 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.013542 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zjkpf\" (UniqueName: \"kubernetes.io/projected/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-kube-api-access-zjkpf\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.016012 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.024014 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.041327 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zjkpf\" (UniqueName: \"kubernetes.io/projected/6f14819a-a6a1-49ff-8e2a-ba7761c8a2be-kube-api-access-zjkpf\") pod \"marketplace-operator-79b997595-qxbct\" (UID: \"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be\") " pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.073268 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.089637 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.114317 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-catalog-content\") pod \"4e629356-d5af-454b-8451-c651ccf13b32\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.114363 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fds57\" (UniqueName: \"kubernetes.io/projected/4e629356-d5af-454b-8451-c651ccf13b32-kube-api-access-fds57\") pod \"4e629356-d5af-454b-8451-c651ccf13b32\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.114444 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-utilities\") pod \"4e629356-d5af-454b-8451-c651ccf13b32\" (UID: \"4e629356-d5af-454b-8451-c651ccf13b32\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.116507 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-utilities" (OuterVolumeSpecName: "utilities") pod "4e629356-d5af-454b-8451-c651ccf13b32" (UID: "4e629356-d5af-454b-8451-c651ccf13b32"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.139227 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.143702 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e629356-d5af-454b-8451-c651ccf13b32-kube-api-access-fds57" (OuterVolumeSpecName: "kube-api-access-fds57") pod "4e629356-d5af-454b-8451-c651ccf13b32" (UID: "4e629356-d5af-454b-8451-c651ccf13b32"). InnerVolumeSpecName "kube-api-access-fds57". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.151322 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.216364 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-operator-metrics\") pod \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.217821 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kpzxw\" (UniqueName: \"kubernetes.io/projected/b241b260-2dbd-4383-8a28-b8728e86a605-kube-api-access-kpzxw\") pod \"b241b260-2dbd-4383-8a28-b8728e86a605\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.217878 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-utilities\") pod \"b241b260-2dbd-4383-8a28-b8728e86a605\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.217900 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-trusted-ca\") pod \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.217916 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fnmb\" (UniqueName: \"kubernetes.io/projected/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-kube-api-access-5fnmb\") pod \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\" (UID: \"6f6a533d-1bcb-409b-994e-e4ec71cffaeb\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.217968 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-catalog-content\") pod \"b241b260-2dbd-4383-8a28-b8728e86a605\" (UID: \"b241b260-2dbd-4383-8a28-b8728e86a605\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.218102 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.218113 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fds57\" (UniqueName: \"kubernetes.io/projected/4e629356-d5af-454b-8451-c651ccf13b32-kube-api-access-fds57\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.221045 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "6f6a533d-1bcb-409b-994e-e4ec71cffaeb" (UID: "6f6a533d-1bcb-409b-994e-e4ec71cffaeb"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.222034 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-utilities" (OuterVolumeSpecName: "utilities") pod "b241b260-2dbd-4383-8a28-b8728e86a605" (UID: "b241b260-2dbd-4383-8a28-b8728e86a605"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.222080 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "6f6a533d-1bcb-409b-994e-e4ec71cffaeb" (UID: "6f6a533d-1bcb-409b-994e-e4ec71cffaeb"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.223019 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.225721 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.231259 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b241b260-2dbd-4383-8a28-b8728e86a605-kube-api-access-kpzxw" (OuterVolumeSpecName: "kube-api-access-kpzxw") pod "b241b260-2dbd-4383-8a28-b8728e86a605" (UID: "b241b260-2dbd-4383-8a28-b8728e86a605"). InnerVolumeSpecName "kube-api-access-kpzxw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.237537 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-kube-api-access-5fnmb" (OuterVolumeSpecName: "kube-api-access-5fnmb") pod "6f6a533d-1bcb-409b-994e-e4ec71cffaeb" (UID: "6f6a533d-1bcb-409b-994e-e4ec71cffaeb"). InnerVolumeSpecName "kube-api-access-5fnmb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.278948 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e629356-d5af-454b-8451-c651ccf13b32" (UID: "4e629356-d5af-454b-8451-c651ccf13b32"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321649 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-utilities\") pod \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321688 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjshf\" (UniqueName: \"kubernetes.io/projected/797d3b59-9112-4efe-bd7d-34718545bc1c-kube-api-access-cjshf\") pod \"797d3b59-9112-4efe-bd7d-34718545bc1c\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321715 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-catalog-content\") pod \"797d3b59-9112-4efe-bd7d-34718545bc1c\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321735 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-catalog-content\") pod \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321771 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzvc2\" (UniqueName: \"kubernetes.io/projected/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-kube-api-access-wzvc2\") pod \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\" (UID: \"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321786 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-utilities\") pod \"797d3b59-9112-4efe-bd7d-34718545bc1c\" (UID: \"797d3b59-9112-4efe-bd7d-34718545bc1c\") " Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321928 4783 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321941 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e629356-d5af-454b-8451-c651ccf13b32-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321950 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kpzxw\" (UniqueName: \"kubernetes.io/projected/b241b260-2dbd-4383-8a28-b8728e86a605-kube-api-access-kpzxw\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321959 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321968 4783 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.321976 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fnmb\" (UniqueName: \"kubernetes.io/projected/6f6a533d-1bcb-409b-994e-e4ec71cffaeb-kube-api-access-5fnmb\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.322608 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-utilities" (OuterVolumeSpecName: "utilities") pod "797d3b59-9112-4efe-bd7d-34718545bc1c" (UID: "797d3b59-9112-4efe-bd7d-34718545bc1c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.324476 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-utilities" (OuterVolumeSpecName: "utilities") pod "df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" (UID: "df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.332605 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-kube-api-access-wzvc2" (OuterVolumeSpecName: "kube-api-access-wzvc2") pod "df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" (UID: "df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850"). InnerVolumeSpecName "kube-api-access-wzvc2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.341218 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/797d3b59-9112-4efe-bd7d-34718545bc1c-kube-api-access-cjshf" (OuterVolumeSpecName: "kube-api-access-cjshf") pod "797d3b59-9112-4efe-bd7d-34718545bc1c" (UID: "797d3b59-9112-4efe-bd7d-34718545bc1c"). InnerVolumeSpecName "kube-api-access-cjshf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.372620 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" (UID: "df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.384695 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b241b260-2dbd-4383-8a28-b8728e86a605" (UID: "b241b260-2dbd-4383-8a28-b8728e86a605"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.423130 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.423181 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjshf\" (UniqueName: \"kubernetes.io/projected/797d3b59-9112-4efe-bd7d-34718545bc1c-kube-api-access-cjshf\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.423191 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.423201 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzvc2\" (UniqueName: \"kubernetes.io/projected/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850-kube-api-access-wzvc2\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.423211 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.423218 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b241b260-2dbd-4383-8a28-b8728e86a605-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.561593 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "797d3b59-9112-4efe-bd7d-34718545bc1c" (UID: "797d3b59-9112-4efe-bd7d-34718545bc1c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.573179 4783 generic.go:334] "Generic (PLEG): container finished" podID="4e629356-d5af-454b-8451-c651ccf13b32" containerID="6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0" exitCode=0 Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.573238 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerDied","Data":"6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.573249 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cs782" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.573312 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cs782" event={"ID":"4e629356-d5af-454b-8451-c651ccf13b32","Type":"ContainerDied","Data":"e0aa1b93c904f50cb41610b881fe0382e37ff0a8d6e80caebd54fc7547dd22b2"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.573335 4783 scope.go:117] "RemoveContainer" containerID="6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.576789 4783 generic.go:334] "Generic (PLEG): container finished" podID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerID="65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8" exitCode=0 Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.576871 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncpxv" event={"ID":"797d3b59-9112-4efe-bd7d-34718545bc1c","Type":"ContainerDied","Data":"65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.576898 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-ncpxv" event={"ID":"797d3b59-9112-4efe-bd7d-34718545bc1c","Type":"ContainerDied","Data":"32c0d35537aea3c056c16c0c9f872ac53f0c41987ee6a687a1c9cbe929ea2c31"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.576965 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-ncpxv" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.588100 4783 generic.go:334] "Generic (PLEG): container finished" podID="b241b260-2dbd-4383-8a28-b8728e86a605" containerID="8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302" exitCode=0 Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.588167 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerDied","Data":"8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.588195 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97zw7" event={"ID":"b241b260-2dbd-4383-8a28-b8728e86a605","Type":"ContainerDied","Data":"8189f44038888d6d66622e27d62c0b48887ae4e96458df2ec310aed22d0ab21e"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.588257 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97zw7" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.590944 4783 generic.go:334] "Generic (PLEG): container finished" podID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerID="11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171" exitCode=0 Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.590994 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" event={"ID":"6f6a533d-1bcb-409b-994e-e4ec71cffaeb","Type":"ContainerDied","Data":"11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.591017 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" event={"ID":"6f6a533d-1bcb-409b-994e-e4ec71cffaeb","Type":"ContainerDied","Data":"090fe25ee9b0bc21c0db2a68e62ab38364dc6c136e805531fc7552de70c50349"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.591065 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-q7g9p" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.597325 4783 scope.go:117] "RemoveContainer" containerID="5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.602801 4783 generic.go:334] "Generic (PLEG): container finished" podID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerID="c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210" exitCode=0 Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.602836 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7sxv" event={"ID":"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850","Type":"ContainerDied","Data":"c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.602858 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7sxv" event={"ID":"df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850","Type":"ContainerDied","Data":"1af64d36a11b2d762142e8a7d3c729809cbeb9a8bd0e69a015c1fd1372735d72"} Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.602910 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7sxv" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.608842 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cs782"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.612139 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cs782"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.621013 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-qxbct"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.632179 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/797d3b59-9112-4efe-bd7d-34718545bc1c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.652861 4783 scope.go:117] "RemoveContainer" containerID="86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.666541 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-q7g9p"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.674860 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-q7g9p"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.678015 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-ncpxv"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.681454 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-ncpxv"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.685687 4783 scope.go:117] "RemoveContainer" containerID="6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.686616 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0\": container with ID starting with 6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0 not found: ID does not exist" containerID="6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.686717 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0"} err="failed to get container status \"6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0\": rpc error: code = NotFound desc = could not find container \"6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0\": container with ID starting with 6f1c72dacf9221a8287d5e2b0392fa09c2eba3b75b0ce36872576b7f0ace58c0 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.686820 4783 scope.go:117] "RemoveContainer" containerID="5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.687817 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815\": container with ID starting with 5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815 not found: ID does not exist" containerID="5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.687900 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815"} err="failed to get container status \"5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815\": rpc error: code = NotFound desc = could not find container \"5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815\": container with ID starting with 5971d961ae123f8871393d051e6ccc4ea725f4ad1ea4ad089348435c939de815 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.687979 4783 scope.go:117] "RemoveContainer" containerID="86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.688093 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-97zw7"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.691250 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-97zw7"] Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.691421 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548\": container with ID starting with 86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548 not found: ID does not exist" containerID="86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.691439 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548"} err="failed to get container status \"86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548\": rpc error: code = NotFound desc = could not find container \"86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548\": container with ID starting with 86e53e1b538ef0bf6a97ec161e079f44ae16c31c0476eb3808e9c328a39b5548 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.691452 4783 scope.go:117] "RemoveContainer" containerID="65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.694355 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7sxv"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.700789 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7sxv"] Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.707342 4783 scope.go:117] "RemoveContainer" containerID="2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.739707 4783 scope.go:117] "RemoveContainer" containerID="ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.757781 4783 scope.go:117] "RemoveContainer" containerID="65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.758281 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8\": container with ID starting with 65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8 not found: ID does not exist" containerID="65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.758355 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8"} err="failed to get container status \"65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8\": rpc error: code = NotFound desc = could not find container \"65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8\": container with ID starting with 65bd0296c4e52c078167aabf4e8a94dcdcb21c4e64af4ca102f7f8186adc69e8 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.758375 4783 scope.go:117] "RemoveContainer" containerID="2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.759482 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822\": container with ID starting with 2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822 not found: ID does not exist" containerID="2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.759536 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822"} err="failed to get container status \"2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822\": rpc error: code = NotFound desc = could not find container \"2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822\": container with ID starting with 2c03a2c8c42fd2873a3348d646690a6332b2332d1e2f6ee70434383896826822 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.759559 4783 scope.go:117] "RemoveContainer" containerID="ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.759926 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9\": container with ID starting with ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9 not found: ID does not exist" containerID="ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.760046 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9"} err="failed to get container status \"ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9\": rpc error: code = NotFound desc = could not find container \"ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9\": container with ID starting with ccb61612401879186d0f5f7275079919f33985e349e1057fe71c9bb368fbbbf9 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.760216 4783 scope.go:117] "RemoveContainer" containerID="8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.772338 4783 scope.go:117] "RemoveContainer" containerID="e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.785083 4783 scope.go:117] "RemoveContainer" containerID="d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.798464 4783 scope.go:117] "RemoveContainer" containerID="8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.798900 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302\": container with ID starting with 8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302 not found: ID does not exist" containerID="8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.798951 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302"} err="failed to get container status \"8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302\": rpc error: code = NotFound desc = could not find container \"8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302\": container with ID starting with 8cdd7352440819f6a136d35e57e776fd6a46304a63c5a3608159ac0d05e6b302 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.798990 4783 scope.go:117] "RemoveContainer" containerID="e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.799458 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8\": container with ID starting with e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8 not found: ID does not exist" containerID="e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.799490 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8"} err="failed to get container status \"e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8\": rpc error: code = NotFound desc = could not find container \"e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8\": container with ID starting with e81e4672a11f65e852fd4c30ac3737e1cf41460c8e1a25002908dea8bffcfaa8 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.799534 4783 scope.go:117] "RemoveContainer" containerID="d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.800328 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7\": container with ID starting with d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7 not found: ID does not exist" containerID="d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.800372 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7"} err="failed to get container status \"d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7\": rpc error: code = NotFound desc = could not find container \"d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7\": container with ID starting with d251e1971762f9daf7d73081d159191af52cb4d4c0c3619be4963b16eef178f7 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.800403 4783 scope.go:117] "RemoveContainer" containerID="11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.811558 4783 scope.go:117] "RemoveContainer" containerID="11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.811901 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171\": container with ID starting with 11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171 not found: ID does not exist" containerID="11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.811931 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171"} err="failed to get container status \"11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171\": rpc error: code = NotFound desc = could not find container \"11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171\": container with ID starting with 11fe1576caa1746ba83e7ae6aa62b7f9f97aec6d6810ff990c60f7092a935171 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.811959 4783 scope.go:117] "RemoveContainer" containerID="c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.825457 4783 scope.go:117] "RemoveContainer" containerID="b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.871124 4783 scope.go:117] "RemoveContainer" containerID="28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.888841 4783 scope.go:117] "RemoveContainer" containerID="c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.889170 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210\": container with ID starting with c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210 not found: ID does not exist" containerID="c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.889208 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210"} err="failed to get container status \"c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210\": rpc error: code = NotFound desc = could not find container \"c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210\": container with ID starting with c660217facfe9b4e4eafb8de09cf5194170daf7c5bbfa3c7678d344ee508d210 not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.889233 4783 scope.go:117] "RemoveContainer" containerID="b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.889669 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c\": container with ID starting with b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c not found: ID does not exist" containerID="b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.889740 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c"} err="failed to get container status \"b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c\": rpc error: code = NotFound desc = could not find container \"b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c\": container with ID starting with b5c242d4447980f08ae648f850022eb4568fd6d40bc7201f94abe3e36eeca55c not found: ID does not exist" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.889770 4783 scope.go:117] "RemoveContainer" containerID="28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8" Oct 02 10:57:50 crc kubenswrapper[4783]: E1002 10:57:50.890121 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8\": container with ID starting with 28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8 not found: ID does not exist" containerID="28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8" Oct 02 10:57:50 crc kubenswrapper[4783]: I1002 10:57:50.890152 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8"} err="failed to get container status \"28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8\": rpc error: code = NotFound desc = could not find container \"28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8\": container with ID starting with 28d6fd42325bd8a75aa51b45086a3bfe9a4e0bc5181cb2946950516b1cb787a8 not found: ID does not exist" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.075742 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4cnvp"] Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.549736 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e629356-d5af-454b-8451-c651ccf13b32" path="/var/lib/kubelet/pods/4e629356-d5af-454b-8451-c651ccf13b32/volumes" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.550392 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" path="/var/lib/kubelet/pods/6f6a533d-1bcb-409b-994e-e4ec71cffaeb/volumes" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.550928 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" path="/var/lib/kubelet/pods/797d3b59-9112-4efe-bd7d-34718545bc1c/volumes" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.551926 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" path="/var/lib/kubelet/pods/b241b260-2dbd-4383-8a28-b8728e86a605/volumes" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.552458 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" path="/var/lib/kubelet/pods/df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850/volumes" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.612239 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" event={"ID":"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be","Type":"ContainerStarted","Data":"fec41e389ab65ee473252081899afa6c37bac897f18169709ed3eb7a02c87684"} Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.612296 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" event={"ID":"6f14819a-a6a1-49ff-8e2a-ba7761c8a2be","Type":"ContainerStarted","Data":"416956df9af4f8e149ee93ba34032e740410a4caa6153ffb80b8a2ddaad9708e"} Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.612656 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.613835 4783 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-qxbct container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.54:8080/healthz\": dial tcp 10.217.0.54:8080: connect: connection refused" start-of-body= Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.613932 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" podUID="6f14819a-a6a1-49ff-8e2a-ba7761c8a2be" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.54:8080/healthz\": dial tcp 10.217.0.54:8080: connect: connection refused" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.628705 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" podStartSLOduration=2.628687684 podStartE2EDuration="2.628687684s" podCreationTimestamp="2025-10-02 10:57:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:57:51.626261506 +0000 UTC m=+304.942455767" watchObservedRunningTime="2025-10-02 10:57:51.628687684 +0000 UTC m=+304.944881945" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913616 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-lcc7z"] Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913796 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913807 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913818 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913824 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913835 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913842 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913850 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913856 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913866 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913872 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913880 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913885 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913893 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913898 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913906 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913911 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913920 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913926 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913932 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerName="marketplace-operator" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913938 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerName="marketplace-operator" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913945 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913951 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="extract-utilities" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913958 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913964 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: E1002 10:57:51.913970 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.913975 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="extract-content" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.914050 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e629356-d5af-454b-8451-c651ccf13b32" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.914060 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="797d3b59-9112-4efe-bd7d-34718545bc1c" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.914069 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="df7d4c4d-8ca4-4ecb-aae5-8cc41f64d850" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.914076 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f6a533d-1bcb-409b-994e-e4ec71cffaeb" containerName="marketplace-operator" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.914083 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b241b260-2dbd-4383-8a28-b8728e86a605" containerName="registry-server" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.919042 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.920912 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.921747 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lcc7z"] Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.947623 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtsh9\" (UniqueName: \"kubernetes.io/projected/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-kube-api-access-gtsh9\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.947866 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-catalog-content\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:51 crc kubenswrapper[4783]: I1002 10:57:51.947891 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-utilities\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.048725 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtsh9\" (UniqueName: \"kubernetes.io/projected/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-kube-api-access-gtsh9\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.048791 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-catalog-content\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.048816 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-utilities\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.049233 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-utilities\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.049768 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-catalog-content\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.068734 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtsh9\" (UniqueName: \"kubernetes.io/projected/bcf29f25-4d45-44e2-aae1-32ec435ad6a6-kube-api-access-gtsh9\") pod \"redhat-marketplace-lcc7z\" (UID: \"bcf29f25-4d45-44e2-aae1-32ec435ad6a6\") " pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.114735 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-xqljq"] Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.115595 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.118606 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.122051 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xqljq"] Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.149727 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01532edf-0ac3-455f-8915-72314d7061c1-catalog-content\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.149768 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqccg\" (UniqueName: \"kubernetes.io/projected/01532edf-0ac3-455f-8915-72314d7061c1-kube-api-access-vqccg\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.149829 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01532edf-0ac3-455f-8915-72314d7061c1-utilities\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.243518 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.250237 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01532edf-0ac3-455f-8915-72314d7061c1-utilities\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.250388 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01532edf-0ac3-455f-8915-72314d7061c1-catalog-content\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.250500 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqccg\" (UniqueName: \"kubernetes.io/projected/01532edf-0ac3-455f-8915-72314d7061c1-kube-api-access-vqccg\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.250780 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/01532edf-0ac3-455f-8915-72314d7061c1-catalog-content\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.251037 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/01532edf-0ac3-455f-8915-72314d7061c1-utilities\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.275977 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqccg\" (UniqueName: \"kubernetes.io/projected/01532edf-0ac3-455f-8915-72314d7061c1-kube-api-access-vqccg\") pod \"redhat-operators-xqljq\" (UID: \"01532edf-0ac3-455f-8915-72314d7061c1\") " pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.420782 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-lcc7z"] Oct 02 10:57:52 crc kubenswrapper[4783]: W1002 10:57:52.430142 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbcf29f25_4d45_44e2_aae1_32ec435ad6a6.slice/crio-f7ee6a261b07bd59a1eacc1c6c6915e3eefb7afa7e7a5975bc4d9cab553ed962 WatchSource:0}: Error finding container f7ee6a261b07bd59a1eacc1c6c6915e3eefb7afa7e7a5975bc4d9cab553ed962: Status 404 returned error can't find the container with id f7ee6a261b07bd59a1eacc1c6c6915e3eefb7afa7e7a5975bc4d9cab553ed962 Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.432108 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.618164 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lcc7z" event={"ID":"bcf29f25-4d45-44e2-aae1-32ec435ad6a6","Type":"ContainerStarted","Data":"f7ee6a261b07bd59a1eacc1c6c6915e3eefb7afa7e7a5975bc4d9cab553ed962"} Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.622629 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-qxbct" Oct 02 10:57:52 crc kubenswrapper[4783]: I1002 10:57:52.652104 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-xqljq"] Oct 02 10:57:53 crc kubenswrapper[4783]: I1002 10:57:53.624188 4783 generic.go:334] "Generic (PLEG): container finished" podID="bcf29f25-4d45-44e2-aae1-32ec435ad6a6" containerID="e2e53e60b7329eea466c4196ce97adea1dc25b4a2e92b2bb8e10a9c18e345210" exitCode=0 Oct 02 10:57:53 crc kubenswrapper[4783]: I1002 10:57:53.624279 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lcc7z" event={"ID":"bcf29f25-4d45-44e2-aae1-32ec435ad6a6","Type":"ContainerDied","Data":"e2e53e60b7329eea466c4196ce97adea1dc25b4a2e92b2bb8e10a9c18e345210"} Oct 02 10:57:53 crc kubenswrapper[4783]: I1002 10:57:53.626743 4783 generic.go:334] "Generic (PLEG): container finished" podID="01532edf-0ac3-455f-8915-72314d7061c1" containerID="772acf24bb8481a4baf1c6ebbf96b5abc3008c059fdc4bbc527331a295043ba0" exitCode=0 Oct 02 10:57:53 crc kubenswrapper[4783]: I1002 10:57:53.627058 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xqljq" event={"ID":"01532edf-0ac3-455f-8915-72314d7061c1","Type":"ContainerDied","Data":"772acf24bb8481a4baf1c6ebbf96b5abc3008c059fdc4bbc527331a295043ba0"} Oct 02 10:57:53 crc kubenswrapper[4783]: I1002 10:57:53.627094 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xqljq" event={"ID":"01532edf-0ac3-455f-8915-72314d7061c1","Type":"ContainerStarted","Data":"50f302cb75f0d0f70e2ef049280ccf906a83751fc3749a561bf8b65fdde41597"} Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.313316 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-x657f"] Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.315151 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.318092 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.332228 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x657f"] Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.375185 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac5b597-9c3c-414f-8f54-f36f8550d53d-catalog-content\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.375262 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cs5cc\" (UniqueName: \"kubernetes.io/projected/dac5b597-9c3c-414f-8f54-f36f8550d53d-kube-api-access-cs5cc\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.375292 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac5b597-9c3c-414f-8f54-f36f8550d53d-utilities\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.476122 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cs5cc\" (UniqueName: \"kubernetes.io/projected/dac5b597-9c3c-414f-8f54-f36f8550d53d-kube-api-access-cs5cc\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.476174 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac5b597-9c3c-414f-8f54-f36f8550d53d-utilities\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.476224 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac5b597-9c3c-414f-8f54-f36f8550d53d-catalog-content\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.476704 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dac5b597-9c3c-414f-8f54-f36f8550d53d-catalog-content\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.476775 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dac5b597-9c3c-414f-8f54-f36f8550d53d-utilities\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.509782 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ttd82"] Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.510943 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.513058 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.521235 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ttd82"] Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.526826 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cs5cc\" (UniqueName: \"kubernetes.io/projected/dac5b597-9c3c-414f-8f54-f36f8550d53d-kube-api-access-cs5cc\") pod \"community-operators-x657f\" (UID: \"dac5b597-9c3c-414f-8f54-f36f8550d53d\") " pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.641736 4783 generic.go:334] "Generic (PLEG): container finished" podID="bcf29f25-4d45-44e2-aae1-32ec435ad6a6" containerID="d8aa6060844d011e3e9aec5fa12569aeb2291f21b20e1606eda0618b714db055" exitCode=0 Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.641776 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lcc7z" event={"ID":"bcf29f25-4d45-44e2-aae1-32ec435ad6a6","Type":"ContainerDied","Data":"d8aa6060844d011e3e9aec5fa12569aeb2291f21b20e1606eda0618b714db055"} Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.652733 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-x657f" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.679706 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-utilities\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.679749 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-catalog-content\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.679800 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwrn4\" (UniqueName: \"kubernetes.io/projected/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-kube-api-access-rwrn4\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.780440 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-utilities\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.780769 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-catalog-content\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.780818 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwrn4\" (UniqueName: \"kubernetes.io/projected/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-kube-api-access-rwrn4\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.780953 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-utilities\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.781186 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-catalog-content\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.800779 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwrn4\" (UniqueName: \"kubernetes.io/projected/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-kube-api-access-rwrn4\") pod \"certified-operators-ttd82\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.847204 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-x657f"] Oct 02 10:57:54 crc kubenswrapper[4783]: I1002 10:57:54.851745 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:57:54 crc kubenswrapper[4783]: W1002 10:57:54.856297 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddac5b597_9c3c_414f_8f54_f36f8550d53d.slice/crio-431c626a1af79cdbe3dad7be3aac525882f2c28f0aa4e766f8258a5b7a21ad63 WatchSource:0}: Error finding container 431c626a1af79cdbe3dad7be3aac525882f2c28f0aa4e766f8258a5b7a21ad63: Status 404 returned error can't find the container with id 431c626a1af79cdbe3dad7be3aac525882f2c28f0aa4e766f8258a5b7a21ad63 Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.242647 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ttd82"] Oct 02 10:57:55 crc kubenswrapper[4783]: W1002 10:57:55.246280 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod29f73f22_c6f7_4508_a35a_6ff23efdaaf1.slice/crio-8bdf8043c5e0e2af7ec0e38777468f7d4adb7e23cb08acc270a3dca749fcef37 WatchSource:0}: Error finding container 8bdf8043c5e0e2af7ec0e38777468f7d4adb7e23cb08acc270a3dca749fcef37: Status 404 returned error can't find the container with id 8bdf8043c5e0e2af7ec0e38777468f7d4adb7e23cb08acc270a3dca749fcef37 Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.649982 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-lcc7z" event={"ID":"bcf29f25-4d45-44e2-aae1-32ec435ad6a6","Type":"ContainerStarted","Data":"65ac0c5aab5a48a72a9276d3f8070736e431ef52d4c8b4262507b78489f83b5f"} Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.651195 4783 generic.go:334] "Generic (PLEG): container finished" podID="01532edf-0ac3-455f-8915-72314d7061c1" containerID="77d989c6cf236cde33f33a7051947d25ccd9209da36e577defa7ab4f630ac5f9" exitCode=0 Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.651251 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xqljq" event={"ID":"01532edf-0ac3-455f-8915-72314d7061c1","Type":"ContainerDied","Data":"77d989c6cf236cde33f33a7051947d25ccd9209da36e577defa7ab4f630ac5f9"} Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.653245 4783 generic.go:334] "Generic (PLEG): container finished" podID="29f73f22-c6f7-4508-a35a-6ff23efdaaf1" containerID="f718d554e1568c2125d257dedebc38d4186dbf2164a01a097da985089b28032e" exitCode=0 Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.653301 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ttd82" event={"ID":"29f73f22-c6f7-4508-a35a-6ff23efdaaf1","Type":"ContainerDied","Data":"f718d554e1568c2125d257dedebc38d4186dbf2164a01a097da985089b28032e"} Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.653323 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ttd82" event={"ID":"29f73f22-c6f7-4508-a35a-6ff23efdaaf1","Type":"ContainerStarted","Data":"8bdf8043c5e0e2af7ec0e38777468f7d4adb7e23cb08acc270a3dca749fcef37"} Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.656245 4783 generic.go:334] "Generic (PLEG): container finished" podID="dac5b597-9c3c-414f-8f54-f36f8550d53d" containerID="ef2fc70e90e9f7589cd30d8914584d8ced689656d8f05db157a06150150fa4fa" exitCode=0 Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.656291 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x657f" event={"ID":"dac5b597-9c3c-414f-8f54-f36f8550d53d","Type":"ContainerDied","Data":"ef2fc70e90e9f7589cd30d8914584d8ced689656d8f05db157a06150150fa4fa"} Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.656321 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x657f" event={"ID":"dac5b597-9c3c-414f-8f54-f36f8550d53d","Type":"ContainerStarted","Data":"431c626a1af79cdbe3dad7be3aac525882f2c28f0aa4e766f8258a5b7a21ad63"} Oct 02 10:57:55 crc kubenswrapper[4783]: I1002 10:57:55.667185 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-lcc7z" podStartSLOduration=2.951085768 podStartE2EDuration="4.66716783s" podCreationTimestamp="2025-10-02 10:57:51 +0000 UTC" firstStartedPulling="2025-10-02 10:57:53.62562753 +0000 UTC m=+306.941821791" lastFinishedPulling="2025-10-02 10:57:55.341709592 +0000 UTC m=+308.657903853" observedRunningTime="2025-10-02 10:57:55.666361884 +0000 UTC m=+308.982556145" watchObservedRunningTime="2025-10-02 10:57:55.66716783 +0000 UTC m=+308.983362091" Oct 02 10:57:56 crc kubenswrapper[4783]: I1002 10:57:56.662553 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-xqljq" event={"ID":"01532edf-0ac3-455f-8915-72314d7061c1","Type":"ContainerStarted","Data":"6a8ac4a10b228441f6e7e703c262a4e65f6d3a09ca2f88bd80b46c18a53caf86"} Oct 02 10:57:56 crc kubenswrapper[4783]: I1002 10:57:56.666762 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x657f" event={"ID":"dac5b597-9c3c-414f-8f54-f36f8550d53d","Type":"ContainerStarted","Data":"adcd770ea09060495e3618bd3a520f3b8680eac03d298b5c67952b3b5768fdfc"} Oct 02 10:57:56 crc kubenswrapper[4783]: I1002 10:57:56.697469 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-xqljq" podStartSLOduration=2.201814529 podStartE2EDuration="4.697451497s" podCreationTimestamp="2025-10-02 10:57:52 +0000 UTC" firstStartedPulling="2025-10-02 10:57:53.627969766 +0000 UTC m=+306.944164027" lastFinishedPulling="2025-10-02 10:57:56.123606744 +0000 UTC m=+309.439800995" observedRunningTime="2025-10-02 10:57:56.678621887 +0000 UTC m=+309.994816148" watchObservedRunningTime="2025-10-02 10:57:56.697451497 +0000 UTC m=+310.013645758" Oct 02 10:57:57 crc kubenswrapper[4783]: I1002 10:57:57.673557 4783 generic.go:334] "Generic (PLEG): container finished" podID="dac5b597-9c3c-414f-8f54-f36f8550d53d" containerID="adcd770ea09060495e3618bd3a520f3b8680eac03d298b5c67952b3b5768fdfc" exitCode=0 Oct 02 10:57:57 crc kubenswrapper[4783]: I1002 10:57:57.673891 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x657f" event={"ID":"dac5b597-9c3c-414f-8f54-f36f8550d53d","Type":"ContainerDied","Data":"adcd770ea09060495e3618bd3a520f3b8680eac03d298b5c67952b3b5768fdfc"} Oct 02 10:57:58 crc kubenswrapper[4783]: I1002 10:57:58.680373 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-x657f" event={"ID":"dac5b597-9c3c-414f-8f54-f36f8550d53d","Type":"ContainerStarted","Data":"6bef5117651c3672dfbf44a3256bfa6a625152fc8192a1ef0700d6a6f0914241"} Oct 02 10:57:58 crc kubenswrapper[4783]: I1002 10:57:58.682864 4783 generic.go:334] "Generic (PLEG): container finished" podID="29f73f22-c6f7-4508-a35a-6ff23efdaaf1" containerID="593e0cb4ec9dc61f7e8becdf621fddb468a50c0effdbd9b738de21433a1a2835" exitCode=0 Oct 02 10:57:58 crc kubenswrapper[4783]: I1002 10:57:58.682907 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ttd82" event={"ID":"29f73f22-c6f7-4508-a35a-6ff23efdaaf1","Type":"ContainerDied","Data":"593e0cb4ec9dc61f7e8becdf621fddb468a50c0effdbd9b738de21433a1a2835"} Oct 02 10:57:58 crc kubenswrapper[4783]: I1002 10:57:58.701623 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-x657f" podStartSLOduration=2.169054805 podStartE2EDuration="4.701608887s" podCreationTimestamp="2025-10-02 10:57:54 +0000 UTC" firstStartedPulling="2025-10-02 10:57:55.657800447 +0000 UTC m=+308.973994708" lastFinishedPulling="2025-10-02 10:57:58.190354529 +0000 UTC m=+311.506548790" observedRunningTime="2025-10-02 10:57:58.697947768 +0000 UTC m=+312.014142029" watchObservedRunningTime="2025-10-02 10:57:58.701608887 +0000 UTC m=+312.017803148" Oct 02 10:57:59 crc kubenswrapper[4783]: I1002 10:57:59.690724 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ttd82" event={"ID":"29f73f22-c6f7-4508-a35a-6ff23efdaaf1","Type":"ContainerStarted","Data":"cea9678e16c6bd0838125cd3161b8616f3b4c267f840d30ae963e6f75ebac979"} Oct 02 10:57:59 crc kubenswrapper[4783]: I1002 10:57:59.719477 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ttd82" podStartSLOduration=2.188076339 podStartE2EDuration="5.719459281s" podCreationTimestamp="2025-10-02 10:57:54 +0000 UTC" firstStartedPulling="2025-10-02 10:57:55.654535811 +0000 UTC m=+308.970730072" lastFinishedPulling="2025-10-02 10:57:59.185918753 +0000 UTC m=+312.502113014" observedRunningTime="2025-10-02 10:57:59.717440186 +0000 UTC m=+313.033634447" watchObservedRunningTime="2025-10-02 10:57:59.719459281 +0000 UTC m=+313.035653562" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.244636 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.244945 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.286250 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.432906 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.433504 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.477114 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.750211 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-xqljq" Oct 02 10:58:02 crc kubenswrapper[4783]: I1002 10:58:02.754540 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-lcc7z" Oct 02 10:58:04 crc kubenswrapper[4783]: I1002 10:58:04.653571 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-x657f" Oct 02 10:58:04 crc kubenswrapper[4783]: I1002 10:58:04.653680 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-x657f" Oct 02 10:58:04 crc kubenswrapper[4783]: I1002 10:58:04.700160 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-x657f" Oct 02 10:58:04 crc kubenswrapper[4783]: I1002 10:58:04.760878 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-x657f" Oct 02 10:58:04 crc kubenswrapper[4783]: I1002 10:58:04.852390 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:58:04 crc kubenswrapper[4783]: I1002 10:58:04.852441 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:58:04 crc kubenswrapper[4783]: I1002 10:58:04.886343 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:58:05 crc kubenswrapper[4783]: I1002 10:58:05.764498 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.098660 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" podUID="01de2d55-d330-461b-b801-0bfe3078baab" containerName="oauth-openshift" containerID="cri-o://c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67" gracePeriod=15 Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.447279 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456655 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-cliconfig\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456722 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-router-certs\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456744 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-session\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456780 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-error\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456807 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01de2d55-d330-461b-b801-0bfe3078baab-audit-dir\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456846 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-serving-cert\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456881 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-trusted-ca-bundle\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456896 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-login\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456912 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgspg\" (UniqueName: \"kubernetes.io/projected/01de2d55-d330-461b-b801-0bfe3078baab-kube-api-access-vgspg\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456932 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-provider-selection\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456952 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-idp-0-file-data\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.456976 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-audit-policies\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.457003 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-ocp-branding-template\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.457022 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-service-ca\") pod \"01de2d55-d330-461b-b801-0bfe3078baab\" (UID: \"01de2d55-d330-461b-b801-0bfe3078baab\") " Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.457275 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.457601 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.457618 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.458212 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.458263 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/01de2d55-d330-461b-b801-0bfe3078baab-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.463336 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.463803 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.464080 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.464884 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.467907 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.477760 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.478111 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01de2d55-d330-461b-b801-0bfe3078baab-kube-api-access-vgspg" (OuterVolumeSpecName: "kube-api-access-vgspg") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "kube-api-access-vgspg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.478900 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.494903 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-55c8c74798-mhwv9"] Oct 02 10:58:16 crc kubenswrapper[4783]: E1002 10:58:16.495182 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="01de2d55-d330-461b-b801-0bfe3078baab" containerName="oauth-openshift" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.495197 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="01de2d55-d330-461b-b801-0bfe3078baab" containerName="oauth-openshift" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.495329 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="01de2d55-d330-461b-b801-0bfe3078baab" containerName="oauth-openshift" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.495853 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.497002 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "01de2d55-d330-461b-b801-0bfe3078baab" (UID: "01de2d55-d330-461b-b801-0bfe3078baab"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.505627 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-55c8c74798-mhwv9"] Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.557671 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.557920 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558010 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558094 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558184 4783 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/01de2d55-d330-461b-b801-0bfe3078baab-audit-dir\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558257 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558326 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558393 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558529 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgspg\" (UniqueName: \"kubernetes.io/projected/01de2d55-d330-461b-b801-0bfe3078baab-kube-api-access-vgspg\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558628 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558667 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558690 4783 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-audit-policies\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558711 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.558996 4783 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/01de2d55-d330-461b-b801-0bfe3078baab-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.659827 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-serving-cert\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.659922 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.660401 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.660564 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-error\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.660691 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-login\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.660857 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4f45009b-78a2-4b42-882c-bcb88825f343-audit-dir\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.660941 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-session\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.661026 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-router-certs\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.661112 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-cliconfig\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.661194 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.661273 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gp8r8\" (UniqueName: \"kubernetes.io/projected/4f45009b-78a2-4b42-882c-bcb88825f343-kube-api-access-gp8r8\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.661379 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.661555 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-audit-policies\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.661664 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-service-ca\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762398 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762495 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762538 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-error\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762585 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-login\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762642 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4f45009b-78a2-4b42-882c-bcb88825f343-audit-dir\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762672 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-session\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762708 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-router-certs\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762741 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-cliconfig\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762772 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762803 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gp8r8\" (UniqueName: \"kubernetes.io/projected/4f45009b-78a2-4b42-882c-bcb88825f343-kube-api-access-gp8r8\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762835 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762868 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-audit-policies\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762890 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-service-ca\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762932 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-serving-cert\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.762824 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/4f45009b-78a2-4b42-882c-bcb88825f343-audit-dir\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.764972 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-audit-policies\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.766402 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.766694 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-cliconfig\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.767029 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-service-ca\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.767505 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-serving-cert\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.769166 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.770531 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-login\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.770857 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-session\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.771503 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-router-certs\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.771512 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.772286 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-user-template-error\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.773561 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/4f45009b-78a2-4b42-882c-bcb88825f343-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.784527 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gp8r8\" (UniqueName: \"kubernetes.io/projected/4f45009b-78a2-4b42-882c-bcb88825f343-kube-api-access-gp8r8\") pod \"oauth-openshift-55c8c74798-mhwv9\" (UID: \"4f45009b-78a2-4b42-882c-bcb88825f343\") " pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.800385 4783 generic.go:334] "Generic (PLEG): container finished" podID="01de2d55-d330-461b-b801-0bfe3078baab" containerID="c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67" exitCode=0 Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.800469 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" event={"ID":"01de2d55-d330-461b-b801-0bfe3078baab","Type":"ContainerDied","Data":"c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67"} Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.800525 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.800554 4783 scope.go:117] "RemoveContainer" containerID="c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.800536 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-4cnvp" event={"ID":"01de2d55-d330-461b-b801-0bfe3078baab","Type":"ContainerDied","Data":"6c94bf68991a7790ae11eed88af4adf040fb421890d8e26bfc5abe2ce86791f9"} Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.824250 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.825206 4783 scope.go:117] "RemoveContainer" containerID="c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67" Oct 02 10:58:16 crc kubenswrapper[4783]: E1002 10:58:16.825846 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67\": container with ID starting with c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67 not found: ID does not exist" containerID="c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.825982 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67"} err="failed to get container status \"c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67\": rpc error: code = NotFound desc = could not find container \"c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67\": container with ID starting with c1ddb6806afb1fc7ddeeead147df1e0c7be6ab93e41c4abdcd90556a635c7b67 not found: ID does not exist" Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.856840 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4cnvp"] Oct 02 10:58:16 crc kubenswrapper[4783]: I1002 10:58:16.860644 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-4cnvp"] Oct 02 10:58:17 crc kubenswrapper[4783]: I1002 10:58:17.040605 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-55c8c74798-mhwv9"] Oct 02 10:58:17 crc kubenswrapper[4783]: W1002 10:58:17.045543 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4f45009b_78a2_4b42_882c_bcb88825f343.slice/crio-570194eaa670a6f7691c12a19ca2ebda3509b9962deee558a81cae90a66754d3 WatchSource:0}: Error finding container 570194eaa670a6f7691c12a19ca2ebda3509b9962deee558a81cae90a66754d3: Status 404 returned error can't find the container with id 570194eaa670a6f7691c12a19ca2ebda3509b9962deee558a81cae90a66754d3 Oct 02 10:58:17 crc kubenswrapper[4783]: I1002 10:58:17.556290 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01de2d55-d330-461b-b801-0bfe3078baab" path="/var/lib/kubelet/pods/01de2d55-d330-461b-b801-0bfe3078baab/volumes" Oct 02 10:58:17 crc kubenswrapper[4783]: I1002 10:58:17.808208 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" event={"ID":"4f45009b-78a2-4b42-882c-bcb88825f343","Type":"ContainerStarted","Data":"66e18cdcb3d7dce7578ae19f1fafc4fd04ccb3c54723b7d7271771aeafad82e7"} Oct 02 10:58:17 crc kubenswrapper[4783]: I1002 10:58:17.808312 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" event={"ID":"4f45009b-78a2-4b42-882c-bcb88825f343","Type":"ContainerStarted","Data":"570194eaa670a6f7691c12a19ca2ebda3509b9962deee558a81cae90a66754d3"} Oct 02 10:58:17 crc kubenswrapper[4783]: I1002 10:58:17.808388 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:17 crc kubenswrapper[4783]: I1002 10:58:17.846006 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" podStartSLOduration=26.845972822 podStartE2EDuration="26.845972822s" podCreationTimestamp="2025-10-02 10:57:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:58:17.839818078 +0000 UTC m=+331.156012389" watchObservedRunningTime="2025-10-02 10:58:17.845972822 +0000 UTC m=+331.162167123" Oct 02 10:58:18 crc kubenswrapper[4783]: I1002 10:58:18.086082 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" Oct 02 10:58:51 crc kubenswrapper[4783]: I1002 10:58:51.514100 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 10:58:51 crc kubenswrapper[4783]: I1002 10:58:51.515070 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 10:59:21 crc kubenswrapper[4783]: I1002 10:59:21.513967 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 10:59:21 crc kubenswrapper[4783]: I1002 10:59:21.514790 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.523364 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6692v"] Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.524605 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.586565 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6692v"] Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664123 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0b625774-b2a5-4b9c-a66d-ee32c880ca66-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664197 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664252 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b625774-b2a5-4b9c-a66d-ee32c880ca66-trusted-ca\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664303 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0b625774-b2a5-4b9c-a66d-ee32c880ca66-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664375 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-bound-sa-token\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664486 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0b625774-b2a5-4b9c-a66d-ee32c880ca66-registry-certificates\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664548 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-registry-tls\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.664605 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4sjnw\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-kube-api-access-4sjnw\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.686525 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.765314 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0b625774-b2a5-4b9c-a66d-ee32c880ca66-registry-certificates\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.765367 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-registry-tls\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.765396 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4sjnw\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-kube-api-access-4sjnw\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.765440 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0b625774-b2a5-4b9c-a66d-ee32c880ca66-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.765463 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b625774-b2a5-4b9c-a66d-ee32c880ca66-trusted-ca\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.765483 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0b625774-b2a5-4b9c-a66d-ee32c880ca66-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.765501 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-bound-sa-token\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.766205 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/0b625774-b2a5-4b9c-a66d-ee32c880ca66-ca-trust-extracted\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.767023 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/0b625774-b2a5-4b9c-a66d-ee32c880ca66-registry-certificates\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.767099 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0b625774-b2a5-4b9c-a66d-ee32c880ca66-trusted-ca\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.771399 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/0b625774-b2a5-4b9c-a66d-ee32c880ca66-installation-pull-secrets\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.772578 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-registry-tls\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.786651 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-bound-sa-token\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.793626 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4sjnw\" (UniqueName: \"kubernetes.io/projected/0b625774-b2a5-4b9c-a66d-ee32c880ca66-kube-api-access-4sjnw\") pod \"image-registry-66df7c8f76-6692v\" (UID: \"0b625774-b2a5-4b9c-a66d-ee32c880ca66\") " pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:40 crc kubenswrapper[4783]: I1002 10:59:40.850171 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:41 crc kubenswrapper[4783]: I1002 10:59:41.068671 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-6692v"] Oct 02 10:59:41 crc kubenswrapper[4783]: I1002 10:59:41.338644 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-6692v" event={"ID":"0b625774-b2a5-4b9c-a66d-ee32c880ca66","Type":"ContainerStarted","Data":"33dae71ae9da044147864f38338064a436ba2eb19f98f2fdbe553f24bc8e0ce6"} Oct 02 10:59:41 crc kubenswrapper[4783]: I1002 10:59:41.338686 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-6692v" event={"ID":"0b625774-b2a5-4b9c-a66d-ee32c880ca66","Type":"ContainerStarted","Data":"4a338c3ec4743c00b242c8e4aa1c694c521fff6fbb6da6bb6ddcd7fbf37d7bd2"} Oct 02 10:59:41 crc kubenswrapper[4783]: I1002 10:59:41.338789 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 10:59:41 crc kubenswrapper[4783]: I1002 10:59:41.359665 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-6692v" podStartSLOduration=1.359644611 podStartE2EDuration="1.359644611s" podCreationTimestamp="2025-10-02 10:59:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 10:59:41.357939113 +0000 UTC m=+414.674133384" watchObservedRunningTime="2025-10-02 10:59:41.359644611 +0000 UTC m=+414.675838892" Oct 02 10:59:51 crc kubenswrapper[4783]: I1002 10:59:51.514021 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 10:59:51 crc kubenswrapper[4783]: I1002 10:59:51.514610 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 10:59:51 crc kubenswrapper[4783]: I1002 10:59:51.514659 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 10:59:51 crc kubenswrapper[4783]: I1002 10:59:51.515112 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"71a46c0d678687b26ff4e9327de076d3d20fdca296f5eb412b9a94a9f573bc25"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 10:59:51 crc kubenswrapper[4783]: I1002 10:59:51.515169 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://71a46c0d678687b26ff4e9327de076d3d20fdca296f5eb412b9a94a9f573bc25" gracePeriod=600 Oct 02 10:59:52 crc kubenswrapper[4783]: I1002 10:59:52.408236 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="71a46c0d678687b26ff4e9327de076d3d20fdca296f5eb412b9a94a9f573bc25" exitCode=0 Oct 02 10:59:52 crc kubenswrapper[4783]: I1002 10:59:52.408342 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"71a46c0d678687b26ff4e9327de076d3d20fdca296f5eb412b9a94a9f573bc25"} Oct 02 10:59:52 crc kubenswrapper[4783]: I1002 10:59:52.408631 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"c7b837b3146fd661f532d6de35193ba6facf1c1681e01ea3c274bb1093a5666b"} Oct 02 10:59:52 crc kubenswrapper[4783]: I1002 10:59:52.408720 4783 scope.go:117] "RemoveContainer" containerID="65d5af548f6048ec95699a1b7175ec8efea96441c335805d704b414eedac00d5" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.145404 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47"] Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.146867 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.151217 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.152545 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.164256 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47"] Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.228552 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c1eb0a7-8e6f-4c80-a838-9058696e2591-secret-volume\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.228639 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4frjw\" (UniqueName: \"kubernetes.io/projected/1c1eb0a7-8e6f-4c80-a838-9058696e2591-kube-api-access-4frjw\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.228846 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c1eb0a7-8e6f-4c80-a838-9058696e2591-config-volume\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.329816 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c1eb0a7-8e6f-4c80-a838-9058696e2591-config-volume\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.329882 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c1eb0a7-8e6f-4c80-a838-9058696e2591-secret-volume\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.329906 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4frjw\" (UniqueName: \"kubernetes.io/projected/1c1eb0a7-8e6f-4c80-a838-9058696e2591-kube-api-access-4frjw\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.331339 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c1eb0a7-8e6f-4c80-a838-9058696e2591-config-volume\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.336714 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c1eb0a7-8e6f-4c80-a838-9058696e2591-secret-volume\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.359270 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4frjw\" (UniqueName: \"kubernetes.io/projected/1c1eb0a7-8e6f-4c80-a838-9058696e2591-kube-api-access-4frjw\") pod \"collect-profiles-29323380-ftb47\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.479960 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.682482 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47"] Oct 02 11:00:00 crc kubenswrapper[4783]: W1002 11:00:00.691034 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1c1eb0a7_8e6f_4c80_a838_9058696e2591.slice/crio-f0114424468436cc513021e3a3de6cd66497d3ea53332972fb544a6d55dce6b9 WatchSource:0}: Error finding container f0114424468436cc513021e3a3de6cd66497d3ea53332972fb544a6d55dce6b9: Status 404 returned error can't find the container with id f0114424468436cc513021e3a3de6cd66497d3ea53332972fb544a6d55dce6b9 Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.856089 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-6692v" Oct 02 11:00:00 crc kubenswrapper[4783]: I1002 11:00:00.916267 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-shp99"] Oct 02 11:00:01 crc kubenswrapper[4783]: I1002 11:00:01.482343 4783 generic.go:334] "Generic (PLEG): container finished" podID="1c1eb0a7-8e6f-4c80-a838-9058696e2591" containerID="3a56660b14fac27f885845542ee4a326f5664eedb3c47c6bbf9317abe972d6eb" exitCode=0 Oct 02 11:00:01 crc kubenswrapper[4783]: I1002 11:00:01.482601 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" event={"ID":"1c1eb0a7-8e6f-4c80-a838-9058696e2591","Type":"ContainerDied","Data":"3a56660b14fac27f885845542ee4a326f5664eedb3c47c6bbf9317abe972d6eb"} Oct 02 11:00:01 crc kubenswrapper[4783]: I1002 11:00:01.482632 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" event={"ID":"1c1eb0a7-8e6f-4c80-a838-9058696e2591","Type":"ContainerStarted","Data":"f0114424468436cc513021e3a3de6cd66497d3ea53332972fb544a6d55dce6b9"} Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.686438 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.764145 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c1eb0a7-8e6f-4c80-a838-9058696e2591-config-volume\") pod \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.764191 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c1eb0a7-8e6f-4c80-a838-9058696e2591-secret-volume\") pod \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.764251 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4frjw\" (UniqueName: \"kubernetes.io/projected/1c1eb0a7-8e6f-4c80-a838-9058696e2591-kube-api-access-4frjw\") pod \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\" (UID: \"1c1eb0a7-8e6f-4c80-a838-9058696e2591\") " Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.764801 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c1eb0a7-8e6f-4c80-a838-9058696e2591-config-volume" (OuterVolumeSpecName: "config-volume") pod "1c1eb0a7-8e6f-4c80-a838-9058696e2591" (UID: "1c1eb0a7-8e6f-4c80-a838-9058696e2591"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.769136 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1c1eb0a7-8e6f-4c80-a838-9058696e2591-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "1c1eb0a7-8e6f-4c80-a838-9058696e2591" (UID: "1c1eb0a7-8e6f-4c80-a838-9058696e2591"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.769173 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c1eb0a7-8e6f-4c80-a838-9058696e2591-kube-api-access-4frjw" (OuterVolumeSpecName: "kube-api-access-4frjw") pod "1c1eb0a7-8e6f-4c80-a838-9058696e2591" (UID: "1c1eb0a7-8e6f-4c80-a838-9058696e2591"). InnerVolumeSpecName "kube-api-access-4frjw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.866072 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/1c1eb0a7-8e6f-4c80-a838-9058696e2591-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.866124 4783 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/1c1eb0a7-8e6f-4c80-a838-9058696e2591-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:02 crc kubenswrapper[4783]: I1002 11:00:02.866138 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4frjw\" (UniqueName: \"kubernetes.io/projected/1c1eb0a7-8e6f-4c80-a838-9058696e2591-kube-api-access-4frjw\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:03 crc kubenswrapper[4783]: I1002 11:00:03.492640 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" event={"ID":"1c1eb0a7-8e6f-4c80-a838-9058696e2591","Type":"ContainerDied","Data":"f0114424468436cc513021e3a3de6cd66497d3ea53332972fb544a6d55dce6b9"} Oct 02 11:00:03 crc kubenswrapper[4783]: I1002 11:00:03.492948 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f0114424468436cc513021e3a3de6cd66497d3ea53332972fb544a6d55dce6b9" Oct 02 11:00:03 crc kubenswrapper[4783]: I1002 11:00:03.492870 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47" Oct 02 11:00:25 crc kubenswrapper[4783]: I1002 11:00:25.974165 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" podUID="700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" containerName="registry" containerID="cri-o://d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9" gracePeriod=30 Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.346740 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.503544 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-trusted-ca\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.503835 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.503921 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-certificates\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.503969 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-tls\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.504009 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-ca-trust-extracted\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.504042 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-installation-pull-secrets\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.504069 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dft9f\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-kube-api-access-dft9f\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.504107 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-bound-sa-token\") pod \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\" (UID: \"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe\") " Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.506227 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.506270 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.513448 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-kube-api-access-dft9f" (OuterVolumeSpecName: "kube-api-access-dft9f") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "kube-api-access-dft9f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.515908 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.515960 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.519774 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.524340 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.528072 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" (UID: "700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.605985 4783 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-certificates\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.606044 4783 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-registry-tls\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.606069 4783 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.606088 4783 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.606107 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dft9f\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-kube-api-access-dft9f\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.606125 4783 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-bound-sa-token\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.606142 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe-trusted-ca\") on node \"crc\" DevicePath \"\"" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.633526 4783 generic.go:334] "Generic (PLEG): container finished" podID="700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" containerID="d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9" exitCode=0 Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.633565 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" event={"ID":"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe","Type":"ContainerDied","Data":"d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9"} Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.633597 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" event={"ID":"700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe","Type":"ContainerDied","Data":"fffef357456d8d69aabd735886127d301349d352c75dc8bbf29d36f40216d2c0"} Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.633613 4783 scope.go:117] "RemoveContainer" containerID="d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.633873 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-shp99" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.655774 4783 scope.go:117] "RemoveContainer" containerID="d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9" Oct 02 11:00:26 crc kubenswrapper[4783]: E1002 11:00:26.657255 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9\": container with ID starting with d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9 not found: ID does not exist" containerID="d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.657479 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9"} err="failed to get container status \"d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9\": rpc error: code = NotFound desc = could not find container \"d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9\": container with ID starting with d2c480d4719ee27e7069e85cc4b930bcf12723db22e040f3a6688c13878632d9 not found: ID does not exist" Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.658123 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-shp99"] Oct 02 11:00:26 crc kubenswrapper[4783]: I1002 11:00:26.666661 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-shp99"] Oct 02 11:00:27 crc kubenswrapper[4783]: I1002 11:00:27.557866 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" path="/var/lib/kubelet/pods/700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe/volumes" Oct 02 11:01:51 crc kubenswrapper[4783]: I1002 11:01:51.513930 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:01:51 crc kubenswrapper[4783]: I1002 11:01:51.515645 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:02:21 crc kubenswrapper[4783]: I1002 11:02:21.513556 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:02:21 crc kubenswrapper[4783]: I1002 11:02:21.515587 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:02:51 crc kubenswrapper[4783]: I1002 11:02:51.513544 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:02:51 crc kubenswrapper[4783]: I1002 11:02:51.514161 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:02:51 crc kubenswrapper[4783]: I1002 11:02:51.514225 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:02:51 crc kubenswrapper[4783]: I1002 11:02:51.515117 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c7b837b3146fd661f532d6de35193ba6facf1c1681e01ea3c274bb1093a5666b"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:02:51 crc kubenswrapper[4783]: I1002 11:02:51.515217 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://c7b837b3146fd661f532d6de35193ba6facf1c1681e01ea3c274bb1093a5666b" gracePeriod=600 Oct 02 11:02:52 crc kubenswrapper[4783]: I1002 11:02:52.549142 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="c7b837b3146fd661f532d6de35193ba6facf1c1681e01ea3c274bb1093a5666b" exitCode=0 Oct 02 11:02:52 crc kubenswrapper[4783]: I1002 11:02:52.549254 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"c7b837b3146fd661f532d6de35193ba6facf1c1681e01ea3c274bb1093a5666b"} Oct 02 11:02:52 crc kubenswrapper[4783]: I1002 11:02:52.549497 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"3d77624023f180014e3aae2d3836e37ae2fefe61ff0ccee775ec35e399cb240c"} Oct 02 11:02:52 crc kubenswrapper[4783]: I1002 11:02:52.549519 4783 scope.go:117] "RemoveContainer" containerID="71a46c0d678687b26ff4e9327de076d3d20fdca296f5eb412b9a94a9f573bc25" Oct 02 11:04:51 crc kubenswrapper[4783]: I1002 11:04:51.513804 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:04:51 crc kubenswrapper[4783]: I1002 11:04:51.514409 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.491101 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-r7v8v"] Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.491707 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" podUID="baa575d2-ecb0-4e64-b0fe-361fec92e555" containerName="controller-manager" containerID="cri-o://e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18" gracePeriod=30 Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.583898 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8"] Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.584084 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" podUID="a2328b20-92ab-42c6-a089-89ec4e1c4ffb" containerName="route-controller-manager" containerID="cri-o://51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318" gracePeriod=30 Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.830116 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.890934 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.948486 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/baa575d2-ecb0-4e64-b0fe-361fec92e555-serving-cert\") pod \"baa575d2-ecb0-4e64-b0fe-361fec92e555\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.948553 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-proxy-ca-bundles\") pod \"baa575d2-ecb0-4e64-b0fe-361fec92e555\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.948655 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-client-ca\") pod \"baa575d2-ecb0-4e64-b0fe-361fec92e555\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.948716 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-config\") pod \"baa575d2-ecb0-4e64-b0fe-361fec92e555\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.948796 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vs58m\" (UniqueName: \"kubernetes.io/projected/baa575d2-ecb0-4e64-b0fe-361fec92e555-kube-api-access-vs58m\") pod \"baa575d2-ecb0-4e64-b0fe-361fec92e555\" (UID: \"baa575d2-ecb0-4e64-b0fe-361fec92e555\") " Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.949615 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-client-ca" (OuterVolumeSpecName: "client-ca") pod "baa575d2-ecb0-4e64-b0fe-361fec92e555" (UID: "baa575d2-ecb0-4e64-b0fe-361fec92e555"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.949714 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "baa575d2-ecb0-4e64-b0fe-361fec92e555" (UID: "baa575d2-ecb0-4e64-b0fe-361fec92e555"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.950207 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-config" (OuterVolumeSpecName: "config") pod "baa575d2-ecb0-4e64-b0fe-361fec92e555" (UID: "baa575d2-ecb0-4e64-b0fe-361fec92e555"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.954081 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/baa575d2-ecb0-4e64-b0fe-361fec92e555-kube-api-access-vs58m" (OuterVolumeSpecName: "kube-api-access-vs58m") pod "baa575d2-ecb0-4e64-b0fe-361fec92e555" (UID: "baa575d2-ecb0-4e64-b0fe-361fec92e555"). InnerVolumeSpecName "kube-api-access-vs58m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:05:19 crc kubenswrapper[4783]: I1002 11:05:19.954094 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/baa575d2-ecb0-4e64-b0fe-361fec92e555-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "baa575d2-ecb0-4e64-b0fe-361fec92e555" (UID: "baa575d2-ecb0-4e64-b0fe-361fec92e555"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.050811 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f2pbd\" (UniqueName: \"kubernetes.io/projected/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-kube-api-access-f2pbd\") pod \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.050852 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-config\") pod \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.050883 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-serving-cert\") pod \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.050903 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-client-ca\") pod \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\" (UID: \"a2328b20-92ab-42c6-a089-89ec4e1c4ffb\") " Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.051589 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.051609 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vs58m\" (UniqueName: \"kubernetes.io/projected/baa575d2-ecb0-4e64-b0fe-361fec92e555-kube-api-access-vs58m\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.051616 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-config" (OuterVolumeSpecName: "config") pod "a2328b20-92ab-42c6-a089-89ec4e1c4ffb" (UID: "a2328b20-92ab-42c6-a089-89ec4e1c4ffb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.051632 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/baa575d2-ecb0-4e64-b0fe-361fec92e555-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.051686 4783 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.051700 4783 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/baa575d2-ecb0-4e64-b0fe-361fec92e555-client-ca\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.051778 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-client-ca" (OuterVolumeSpecName: "client-ca") pod "a2328b20-92ab-42c6-a089-89ec4e1c4ffb" (UID: "a2328b20-92ab-42c6-a089-89ec4e1c4ffb"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.053713 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-kube-api-access-f2pbd" (OuterVolumeSpecName: "kube-api-access-f2pbd") pod "a2328b20-92ab-42c6-a089-89ec4e1c4ffb" (UID: "a2328b20-92ab-42c6-a089-89ec4e1c4ffb"). InnerVolumeSpecName "kube-api-access-f2pbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.054308 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a2328b20-92ab-42c6-a089-89ec4e1c4ffb" (UID: "a2328b20-92ab-42c6-a089-89ec4e1c4ffb"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.152608 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f2pbd\" (UniqueName: \"kubernetes.io/projected/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-kube-api-access-f2pbd\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.152643 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.152655 4783 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.152686 4783 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a2328b20-92ab-42c6-a089-89ec4e1c4ffb-client-ca\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.454619 4783 generic.go:334] "Generic (PLEG): container finished" podID="a2328b20-92ab-42c6-a089-89ec4e1c4ffb" containerID="51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318" exitCode=0 Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.454673 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" event={"ID":"a2328b20-92ab-42c6-a089-89ec4e1c4ffb","Type":"ContainerDied","Data":"51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318"} Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.454699 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.454773 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8" event={"ID":"a2328b20-92ab-42c6-a089-89ec4e1c4ffb","Type":"ContainerDied","Data":"ef34a0462c5a3f5ab6c67a31c1075f20d109318bebac83abc00c6e8eade7f48d"} Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.454848 4783 scope.go:117] "RemoveContainer" containerID="51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.456609 4783 generic.go:334] "Generic (PLEG): container finished" podID="baa575d2-ecb0-4e64-b0fe-361fec92e555" containerID="e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18" exitCode=0 Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.456685 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.456719 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" event={"ID":"baa575d2-ecb0-4e64-b0fe-361fec92e555","Type":"ContainerDied","Data":"e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18"} Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.457012 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-r7v8v" event={"ID":"baa575d2-ecb0-4e64-b0fe-361fec92e555","Type":"ContainerDied","Data":"7cbdeeaf72ab82f39569ed87940b3802dd5752183ec24657a0b6172f54641cee"} Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.478356 4783 scope.go:117] "RemoveContainer" containerID="51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318" Oct 02 11:05:20 crc kubenswrapper[4783]: E1002 11:05:20.478770 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318\": container with ID starting with 51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318 not found: ID does not exist" containerID="51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.478797 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318"} err="failed to get container status \"51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318\": rpc error: code = NotFound desc = could not find container \"51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318\": container with ID starting with 51eea399ea15946dc11a4639dd84c3698ad74074a86748ee1d6d3824bbfa0318 not found: ID does not exist" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.478817 4783 scope.go:117] "RemoveContainer" containerID="e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.485813 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-r7v8v"] Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.501436 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-r7v8v"] Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.506471 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8"] Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.510966 4783 scope.go:117] "RemoveContainer" containerID="e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.511173 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-zbtp8"] Oct 02 11:05:20 crc kubenswrapper[4783]: E1002 11:05:20.511366 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18\": container with ID starting with e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18 not found: ID does not exist" containerID="e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18" Oct 02 11:05:20 crc kubenswrapper[4783]: I1002 11:05:20.511405 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18"} err="failed to get container status \"e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18\": rpc error: code = NotFound desc = could not find container \"e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18\": container with ID starting with e2af80275e763991e048e73ec3874b15e64c671e9d99b09f014a9159f4857b18 not found: ID does not exist" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.304629 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-7db7bf8cff-snzxv"] Oct 02 11:05:21 crc kubenswrapper[4783]: E1002 11:05:21.305524 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" containerName="registry" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.305566 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" containerName="registry" Oct 02 11:05:21 crc kubenswrapper[4783]: E1002 11:05:21.305610 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2328b20-92ab-42c6-a089-89ec4e1c4ffb" containerName="route-controller-manager" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.305630 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2328b20-92ab-42c6-a089-89ec4e1c4ffb" containerName="route-controller-manager" Oct 02 11:05:21 crc kubenswrapper[4783]: E1002 11:05:21.305663 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="baa575d2-ecb0-4e64-b0fe-361fec92e555" containerName="controller-manager" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.305687 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="baa575d2-ecb0-4e64-b0fe-361fec92e555" containerName="controller-manager" Oct 02 11:05:21 crc kubenswrapper[4783]: E1002 11:05:21.305714 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c1eb0a7-8e6f-4c80-a838-9058696e2591" containerName="collect-profiles" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.305733 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c1eb0a7-8e6f-4c80-a838-9058696e2591" containerName="collect-profiles" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.305967 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c1eb0a7-8e6f-4c80-a838-9058696e2591" containerName="collect-profiles" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.305996 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="baa575d2-ecb0-4e64-b0fe-361fec92e555" containerName="controller-manager" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.306027 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="700dbfb2-8e0f-4ac6-84a0-3a4b918a6bfe" containerName="registry" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.306057 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2328b20-92ab-42c6-a089-89ec4e1c4ffb" containerName="route-controller-manager" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.306867 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.309223 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.309612 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj"] Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.310549 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.311377 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.311702 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.311957 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.312179 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.315370 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.317001 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.317371 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.317583 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.317736 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.317754 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.322074 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.328004 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.338382 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7db7bf8cff-snzxv"] Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.369862 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj"] Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471003 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df229baf-94b2-4d0c-a109-e679b8522f7b-config\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471250 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-proxy-ca-bundles\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471343 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-serving-cert\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471441 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9p6bg\" (UniqueName: \"kubernetes.io/projected/df229baf-94b2-4d0c-a109-e679b8522f7b-kube-api-access-9p6bg\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471537 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs7k5\" (UniqueName: \"kubernetes.io/projected/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-kube-api-access-rs7k5\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471628 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/df229baf-94b2-4d0c-a109-e679b8522f7b-client-ca\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471707 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-config\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471782 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-client-ca\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.471860 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df229baf-94b2-4d0c-a109-e679b8522f7b-serving-cert\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.513186 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.513704 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.554132 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2328b20-92ab-42c6-a089-89ec4e1c4ffb" path="/var/lib/kubelet/pods/a2328b20-92ab-42c6-a089-89ec4e1c4ffb/volumes" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.555376 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="baa575d2-ecb0-4e64-b0fe-361fec92e555" path="/var/lib/kubelet/pods/baa575d2-ecb0-4e64-b0fe-361fec92e555/volumes" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.572683 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df229baf-94b2-4d0c-a109-e679b8522f7b-config\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.572743 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-proxy-ca-bundles\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.572792 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-serving-cert\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.572834 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9p6bg\" (UniqueName: \"kubernetes.io/projected/df229baf-94b2-4d0c-a109-e679b8522f7b-kube-api-access-9p6bg\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.572879 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs7k5\" (UniqueName: \"kubernetes.io/projected/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-kube-api-access-rs7k5\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.572932 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/df229baf-94b2-4d0c-a109-e679b8522f7b-client-ca\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.572967 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-config\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.573003 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-client-ca\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.573046 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df229baf-94b2-4d0c-a109-e679b8522f7b-serving-cert\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.573834 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df229baf-94b2-4d0c-a109-e679b8522f7b-config\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.574793 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-config\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.574901 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/df229baf-94b2-4d0c-a109-e679b8522f7b-client-ca\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.575043 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-proxy-ca-bundles\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.575823 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-client-ca\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.577044 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/df229baf-94b2-4d0c-a109-e679b8522f7b-serving-cert\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.579803 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-serving-cert\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.602667 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9p6bg\" (UniqueName: \"kubernetes.io/projected/df229baf-94b2-4d0c-a109-e679b8522f7b-kube-api-access-9p6bg\") pod \"route-controller-manager-688bd74c48-7v6qj\" (UID: \"df229baf-94b2-4d0c-a109-e679b8522f7b\") " pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.604690 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs7k5\" (UniqueName: \"kubernetes.io/projected/bcc5c03a-0eb7-461f-917c-ee3137ca62f3-kube-api-access-rs7k5\") pod \"controller-manager-7db7bf8cff-snzxv\" (UID: \"bcc5c03a-0eb7-461f-917c-ee3137ca62f3\") " pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.631050 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.644083 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.838360 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-7db7bf8cff-snzxv"] Oct 02 11:05:21 crc kubenswrapper[4783]: W1002 11:05:21.850277 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbcc5c03a_0eb7_461f_917c_ee3137ca62f3.slice/crio-ef5147b8c81a51e0530a779e1829a3bba6f2fc62ddde1b159ce81a85d99a1690 WatchSource:0}: Error finding container ef5147b8c81a51e0530a779e1829a3bba6f2fc62ddde1b159ce81a85d99a1690: Status 404 returned error can't find the container with id ef5147b8c81a51e0530a779e1829a3bba6f2fc62ddde1b159ce81a85d99a1690 Oct 02 11:05:21 crc kubenswrapper[4783]: I1002 11:05:21.893139 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj"] Oct 02 11:05:21 crc kubenswrapper[4783]: W1002 11:05:21.906876 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddf229baf_94b2_4d0c_a109_e679b8522f7b.slice/crio-20364822d1e4a54e46ec0e41e6a2deb66eed3b14a37e257b5856f455580a17d6 WatchSource:0}: Error finding container 20364822d1e4a54e46ec0e41e6a2deb66eed3b14a37e257b5856f455580a17d6: Status 404 returned error can't find the container with id 20364822d1e4a54e46ec0e41e6a2deb66eed3b14a37e257b5856f455580a17d6 Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.467922 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" event={"ID":"df229baf-94b2-4d0c-a109-e679b8522f7b","Type":"ContainerStarted","Data":"4e34e7721714dbf14caef1b5bdd84a469f894812155f00d048fc73cbf555740d"} Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.468274 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" event={"ID":"df229baf-94b2-4d0c-a109-e679b8522f7b","Type":"ContainerStarted","Data":"20364822d1e4a54e46ec0e41e6a2deb66eed3b14a37e257b5856f455580a17d6"} Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.469911 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.471430 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" event={"ID":"bcc5c03a-0eb7-461f-917c-ee3137ca62f3","Type":"ContainerStarted","Data":"892e7a0f5097475d0b4e17bcf98eed74f71c8874744be6089ba4a56a3aa73556"} Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.471474 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" event={"ID":"bcc5c03a-0eb7-461f-917c-ee3137ca62f3","Type":"ContainerStarted","Data":"ef5147b8c81a51e0530a779e1829a3bba6f2fc62ddde1b159ce81a85d99a1690"} Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.471721 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.475406 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.488612 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" podStartSLOduration=3.488596577 podStartE2EDuration="3.488596577s" podCreationTimestamp="2025-10-02 11:05:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:05:22.486050697 +0000 UTC m=+755.802244958" watchObservedRunningTime="2025-10-02 11:05:22.488596577 +0000 UTC m=+755.804790848" Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.504636 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-7db7bf8cff-snzxv" podStartSLOduration=3.504616434 podStartE2EDuration="3.504616434s" podCreationTimestamp="2025-10-02 11:05:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:05:22.502873806 +0000 UTC m=+755.819068077" watchObservedRunningTime="2025-10-02 11:05:22.504616434 +0000 UTC m=+755.820810695" Oct 02 11:05:22 crc kubenswrapper[4783]: I1002 11:05:22.562573 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" Oct 02 11:05:24 crc kubenswrapper[4783]: I1002 11:05:24.703113 4783 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.579766 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-n7shk"] Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.581103 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.583808 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gkpsw"] Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.584340 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gkpsw" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.584707 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.584832 4783 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-zv6xh" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.585070 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.590159 4783 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-n82b6" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.591787 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-n7shk"] Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.622002 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-stgp6"] Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.622777 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.625827 4783 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-fph89" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.644658 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gkpsw"] Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.656183 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-stgp6"] Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.742334 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zj8d\" (UniqueName: \"kubernetes.io/projected/25ca49b1-8994-44b5-b27d-671cd01d74da-kube-api-access-6zj8d\") pod \"cert-manager-webhook-5655c58dd6-stgp6\" (UID: \"25ca49b1-8994-44b5-b27d-671cd01d74da\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.742380 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pjq6\" (UniqueName: \"kubernetes.io/projected/faf53b2d-1a5b-4753-9067-2c0e6451c204-kube-api-access-9pjq6\") pod \"cert-manager-5b446d88c5-gkpsw\" (UID: \"faf53b2d-1a5b-4753-9067-2c0e6451c204\") " pod="cert-manager/cert-manager-5b446d88c5-gkpsw" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.742443 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dgq95\" (UniqueName: \"kubernetes.io/projected/5f865515-bc01-488a-9f04-6e69b072a30e-kube-api-access-dgq95\") pod \"cert-manager-cainjector-7f985d654d-n7shk\" (UID: \"5f865515-bc01-488a-9f04-6e69b072a30e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.843046 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dgq95\" (UniqueName: \"kubernetes.io/projected/5f865515-bc01-488a-9f04-6e69b072a30e-kube-api-access-dgq95\") pod \"cert-manager-cainjector-7f985d654d-n7shk\" (UID: \"5f865515-bc01-488a-9f04-6e69b072a30e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.843131 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zj8d\" (UniqueName: \"kubernetes.io/projected/25ca49b1-8994-44b5-b27d-671cd01d74da-kube-api-access-6zj8d\") pod \"cert-manager-webhook-5655c58dd6-stgp6\" (UID: \"25ca49b1-8994-44b5-b27d-671cd01d74da\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.843160 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pjq6\" (UniqueName: \"kubernetes.io/projected/faf53b2d-1a5b-4753-9067-2c0e6451c204-kube-api-access-9pjq6\") pod \"cert-manager-5b446d88c5-gkpsw\" (UID: \"faf53b2d-1a5b-4753-9067-2c0e6451c204\") " pod="cert-manager/cert-manager-5b446d88c5-gkpsw" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.861323 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dgq95\" (UniqueName: \"kubernetes.io/projected/5f865515-bc01-488a-9f04-6e69b072a30e-kube-api-access-dgq95\") pod \"cert-manager-cainjector-7f985d654d-n7shk\" (UID: \"5f865515-bc01-488a-9f04-6e69b072a30e\") " pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.862396 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pjq6\" (UniqueName: \"kubernetes.io/projected/faf53b2d-1a5b-4753-9067-2c0e6451c204-kube-api-access-9pjq6\") pod \"cert-manager-5b446d88c5-gkpsw\" (UID: \"faf53b2d-1a5b-4753-9067-2c0e6451c204\") " pod="cert-manager/cert-manager-5b446d88c5-gkpsw" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.862518 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zj8d\" (UniqueName: \"kubernetes.io/projected/25ca49b1-8994-44b5-b27d-671cd01d74da-kube-api-access-6zj8d\") pod \"cert-manager-webhook-5655c58dd6-stgp6\" (UID: \"25ca49b1-8994-44b5-b27d-671cd01d74da\") " pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.910450 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.927229 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-5b446d88c5-gkpsw" Oct 02 11:05:41 crc kubenswrapper[4783]: I1002 11:05:41.957159 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" Oct 02 11:05:42 crc kubenswrapper[4783]: I1002 11:05:42.342797 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-7f985d654d-n7shk"] Oct 02 11:05:42 crc kubenswrapper[4783]: W1002 11:05:42.344655 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5f865515_bc01_488a_9f04_6e69b072a30e.slice/crio-b83b8a920ecd37c1acec2adb5f7ceb03b6d90e4857254c10145785986a3a55be WatchSource:0}: Error finding container b83b8a920ecd37c1acec2adb5f7ceb03b6d90e4857254c10145785986a3a55be: Status 404 returned error can't find the container with id b83b8a920ecd37c1acec2adb5f7ceb03b6d90e4857254c10145785986a3a55be Oct 02 11:05:42 crc kubenswrapper[4783]: I1002 11:05:42.347399 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:05:42 crc kubenswrapper[4783]: I1002 11:05:42.402654 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-5b446d88c5-gkpsw"] Oct 02 11:05:42 crc kubenswrapper[4783]: W1002 11:05:42.409929 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfaf53b2d_1a5b_4753_9067_2c0e6451c204.slice/crio-ce9cd2d1af4304e227edec1391eaeb43f9ed302051c91197dbbf06a62f18ad58 WatchSource:0}: Error finding container ce9cd2d1af4304e227edec1391eaeb43f9ed302051c91197dbbf06a62f18ad58: Status 404 returned error can't find the container with id ce9cd2d1af4304e227edec1391eaeb43f9ed302051c91197dbbf06a62f18ad58 Oct 02 11:05:42 crc kubenswrapper[4783]: I1002 11:05:42.450237 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-5655c58dd6-stgp6"] Oct 02 11:05:42 crc kubenswrapper[4783]: W1002 11:05:42.457702 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod25ca49b1_8994_44b5_b27d_671cd01d74da.slice/crio-8e3620e512672b17e58b18753d28417d4ba9abaf26f1129abc0ca2e3d088facc WatchSource:0}: Error finding container 8e3620e512672b17e58b18753d28417d4ba9abaf26f1129abc0ca2e3d088facc: Status 404 returned error can't find the container with id 8e3620e512672b17e58b18753d28417d4ba9abaf26f1129abc0ca2e3d088facc Oct 02 11:05:42 crc kubenswrapper[4783]: I1002 11:05:42.579402 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gkpsw" event={"ID":"faf53b2d-1a5b-4753-9067-2c0e6451c204","Type":"ContainerStarted","Data":"ce9cd2d1af4304e227edec1391eaeb43f9ed302051c91197dbbf06a62f18ad58"} Oct 02 11:05:42 crc kubenswrapper[4783]: I1002 11:05:42.580694 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" event={"ID":"25ca49b1-8994-44b5-b27d-671cd01d74da","Type":"ContainerStarted","Data":"8e3620e512672b17e58b18753d28417d4ba9abaf26f1129abc0ca2e3d088facc"} Oct 02 11:05:42 crc kubenswrapper[4783]: I1002 11:05:42.582293 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" event={"ID":"5f865515-bc01-488a-9f04-6e69b072a30e","Type":"ContainerStarted","Data":"b83b8a920ecd37c1acec2adb5f7ceb03b6d90e4857254c10145785986a3a55be"} Oct 02 11:05:46 crc kubenswrapper[4783]: I1002 11:05:46.603646 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-5b446d88c5-gkpsw" event={"ID":"faf53b2d-1a5b-4753-9067-2c0e6451c204","Type":"ContainerStarted","Data":"4da0c42fae196ab9b5578da6eb2ffb978d77530864818a3c045180193f806960"} Oct 02 11:05:46 crc kubenswrapper[4783]: I1002 11:05:46.605193 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" event={"ID":"25ca49b1-8994-44b5-b27d-671cd01d74da","Type":"ContainerStarted","Data":"0b2a3157ffdd096e3803f9d0e6c6e8a3edf4cdab2bfb6ddd865dc6031ec5ecbf"} Oct 02 11:05:46 crc kubenswrapper[4783]: I1002 11:05:46.606154 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" event={"ID":"5f865515-bc01-488a-9f04-6e69b072a30e","Type":"ContainerStarted","Data":"d03b28bcf700b73690fc1cc87a15ea8c360203d894d6a7c084855f6acf103eb5"} Oct 02 11:05:46 crc kubenswrapper[4783]: I1002 11:05:46.617120 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-5b446d88c5-gkpsw" podStartSLOduration=2.460775161 podStartE2EDuration="5.617108985s" podCreationTimestamp="2025-10-02 11:05:41 +0000 UTC" firstStartedPulling="2025-10-02 11:05:42.412740287 +0000 UTC m=+775.728934548" lastFinishedPulling="2025-10-02 11:05:45.569074111 +0000 UTC m=+778.885268372" observedRunningTime="2025-10-02 11:05:46.615890892 +0000 UTC m=+779.932085153" watchObservedRunningTime="2025-10-02 11:05:46.617108985 +0000 UTC m=+779.933303246" Oct 02 11:05:46 crc kubenswrapper[4783]: I1002 11:05:46.636239 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-7f985d654d-n7shk" podStartSLOduration=2.420315276 podStartE2EDuration="5.636218156s" podCreationTimestamp="2025-10-02 11:05:41 +0000 UTC" firstStartedPulling="2025-10-02 11:05:42.347185648 +0000 UTC m=+775.663379909" lastFinishedPulling="2025-10-02 11:05:45.563088528 +0000 UTC m=+778.879282789" observedRunningTime="2025-10-02 11:05:46.635656541 +0000 UTC m=+779.951850802" watchObservedRunningTime="2025-10-02 11:05:46.636218156 +0000 UTC m=+779.952412417" Oct 02 11:05:46 crc kubenswrapper[4783]: I1002 11:05:46.959293 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" Oct 02 11:05:51 crc kubenswrapper[4783]: I1002 11:05:51.513736 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:05:51 crc kubenswrapper[4783]: I1002 11:05:51.515104 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:05:51 crc kubenswrapper[4783]: I1002 11:05:51.515203 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:05:51 crc kubenswrapper[4783]: I1002 11:05:51.516254 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3d77624023f180014e3aae2d3836e37ae2fefe61ff0ccee775ec35e399cb240c"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:05:51 crc kubenswrapper[4783]: I1002 11:05:51.516401 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://3d77624023f180014e3aae2d3836e37ae2fefe61ff0ccee775ec35e399cb240c" gracePeriod=600 Oct 02 11:05:51 crc kubenswrapper[4783]: I1002 11:05:51.962004 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" Oct 02 11:05:51 crc kubenswrapper[4783]: I1002 11:05:51.975431 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-5655c58dd6-stgp6" podStartSLOduration=7.798261393 podStartE2EDuration="10.975388885s" podCreationTimestamp="2025-10-02 11:05:41 +0000 UTC" firstStartedPulling="2025-10-02 11:05:42.459930115 +0000 UTC m=+775.776124376" lastFinishedPulling="2025-10-02 11:05:45.637057607 +0000 UTC m=+778.953251868" observedRunningTime="2025-10-02 11:05:46.649620262 +0000 UTC m=+779.965814523" watchObservedRunningTime="2025-10-02 11:05:51.975388885 +0000 UTC m=+785.291583146" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.042514 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qmd84"] Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.043029 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-controller" containerID="cri-o://cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.043089 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="nbdb" containerID="cri-o://a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.043367 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-acl-logging" containerID="cri-o://abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.043406 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-node" containerID="cri-o://9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.043625 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="sbdb" containerID="cri-o://ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.043638 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="northd" containerID="cri-o://38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.043659 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.109876 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" containerID="cri-o://6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" gracePeriod=30 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.380639 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/4.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.381530 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/3.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.383694 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovn-acl-logging/0.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.384173 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovn-controller/0.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.384594 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433148 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-f6ws2"] Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433424 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="nbdb" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433445 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="nbdb" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433456 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433464 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433475 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433483 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433493 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433500 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433510 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433518 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433533 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-node" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433541 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-node" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433550 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-ovn-metrics" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433558 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-ovn-metrics" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433568 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kubecfg-setup" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433576 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kubecfg-setup" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433584 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="northd" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433592 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="northd" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433605 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-acl-logging" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433612 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-acl-logging" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433623 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="sbdb" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433630 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="sbdb" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433742 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-node" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433756 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433768 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="kube-rbac-proxy-ovn-metrics" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433781 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="nbdb" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433791 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433801 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433811 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433818 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovn-acl-logging" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433827 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="northd" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433836 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="sbdb" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433965 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433976 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.433985 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.433993 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.434107 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.434344 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" containerName="ovnkube-controller" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.436000 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475693 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-ovn-kubernetes\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475727 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-netns\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475749 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-env-overrides\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475769 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-slash\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475803 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-netd\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475798 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475839 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-script-lib\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475857 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-openvswitch\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475839 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-slash" (OuterVolumeSpecName: "host-slash") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475907 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475884 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-var-lib-cni-networks-ovn-kubernetes\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475858 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475855 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475941 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.475976 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-systemd\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476028 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/095cdcdf-1ea0-40da-871a-1223c6737377-ovn-node-metrics-cert\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476052 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-systemd-units\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476085 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-kubelet\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476111 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-node-log\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476134 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-var-lib-openvswitch\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476153 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-etc-openvswitch\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476171 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-config\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476190 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-bin\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476219 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-log-socket\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476241 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-ovn\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476257 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hhvtk\" (UniqueName: \"kubernetes.io/projected/095cdcdf-1ea0-40da-871a-1223c6737377-kube-api-access-hhvtk\") pod \"095cdcdf-1ea0-40da-871a-1223c6737377\" (UID: \"095cdcdf-1ea0-40da-871a-1223c6737377\") " Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476276 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476286 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476319 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476320 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476341 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476584 4783 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476596 4783 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476606 4783 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476617 4783 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-bin\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476621 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476626 4783 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476660 4783 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-run-netns\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476672 4783 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-env-overrides\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476684 4783 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-slash\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476695 4783 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-cni-netd\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476706 4783 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476718 4783 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-openvswitch\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476718 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476745 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476746 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-node-log" (OuterVolumeSpecName: "node-log") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476742 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-log-socket" (OuterVolumeSpecName: "log-socket") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.476773 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.481002 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/095cdcdf-1ea0-40da-871a-1223c6737377-kube-api-access-hhvtk" (OuterVolumeSpecName: "kube-api-access-hhvtk") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "kube-api-access-hhvtk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.481200 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/095cdcdf-1ea0-40da-871a-1223c6737377-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.487852 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "095cdcdf-1ea0-40da-871a-1223c6737377" (UID: "095cdcdf-1ea0-40da-871a-1223c6737377"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.577591 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovnkube-script-lib\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.577631 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8zhbj\" (UniqueName: \"kubernetes.io/projected/ded11e77-4b8a-4208-a427-7656b31b9ba5-kube-api-access-8zhbj\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.577773 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-ovn\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.577851 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-var-lib-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.577930 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578002 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-cni-netd\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578026 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-run-netns\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578061 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovnkube-config\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578106 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-node-log\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578134 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-kubelet\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578167 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578194 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-slash\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578227 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-log-socket\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578251 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-systemd-units\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578265 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-run-ovn-kubernetes\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578285 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-systemd\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578367 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovn-node-metrics-cert\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578389 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-cni-bin\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578406 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-etc-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578443 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-env-overrides\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578482 4783 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/095cdcdf-1ea0-40da-871a-1223c6737377-ovnkube-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578492 4783 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-log-socket\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578501 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hhvtk\" (UniqueName: \"kubernetes.io/projected/095cdcdf-1ea0-40da-871a-1223c6737377-kube-api-access-hhvtk\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578510 4783 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578519 4783 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-run-systemd\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578528 4783 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/095cdcdf-1ea0-40da-871a-1223c6737377-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578536 4783 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-systemd-units\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578544 4783 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-host-kubelet\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.578552 4783 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/095cdcdf-1ea0-40da-871a-1223c6737377-node-log\") on node \"crc\" DevicePath \"\"" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.641640 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/2.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.642145 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/1.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.642216 4783 generic.go:334] "Generic (PLEG): container finished" podID="f6c8d5bc-163f-401f-bdc5-4625112dced9" containerID="5e9a142014f6188db79c661039a5fb0036ea5e97daddbfbe9d12633dbdba8daa" exitCode=2 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.642320 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerDied","Data":"5e9a142014f6188db79c661039a5fb0036ea5e97daddbfbe9d12633dbdba8daa"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.642397 4783 scope.go:117] "RemoveContainer" containerID="b082bad7fea22dc168e5afb29973bf16e160c19b67918df47835b6ffa8ce7d1b" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.643049 4783 scope.go:117] "RemoveContainer" containerID="5e9a142014f6188db79c661039a5fb0036ea5e97daddbfbe9d12633dbdba8daa" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.644739 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/4.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.646184 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovnkube-controller/3.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.649036 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovn-acl-logging/0.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.649714 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-qmd84_095cdcdf-1ea0-40da-871a-1223c6737377/ovn-controller/0.log" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650166 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" exitCode=2 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650189 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" exitCode=0 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650197 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" exitCode=0 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650205 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" exitCode=0 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650213 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" exitCode=0 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650220 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" exitCode=0 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650227 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" exitCode=143 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650235 4783 generic.go:334] "Generic (PLEG): container finished" podID="095cdcdf-1ea0-40da-871a-1223c6737377" containerID="cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" exitCode=143 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650358 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650639 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650748 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650839 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.650927 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.651050 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.651153 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.651232 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.651304 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652529 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652548 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652557 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652565 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652574 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652581 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652589 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652595 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652609 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652623 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652633 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652641 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652648 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652656 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652664 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652672 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652679 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652686 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652695 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652707 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652718 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652727 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652734 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652741 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652749 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652756 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652763 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652770 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652778 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652786 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652797 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-qmd84" event={"ID":"095cdcdf-1ea0-40da-871a-1223c6737377","Type":"ContainerDied","Data":"ed4b10c90beeaf12a5f099fbd95c1695e285d247972b79106d428ace9bea2d86"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652808 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652818 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652825 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652833 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652841 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652849 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652857 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652863 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652870 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.652878 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.656442 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="3d77624023f180014e3aae2d3836e37ae2fefe61ff0ccee775ec35e399cb240c" exitCode=0 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.656487 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"3d77624023f180014e3aae2d3836e37ae2fefe61ff0ccee775ec35e399cb240c"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.656514 4783 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"c7b837b3146fd661f532d6de35193ba6facf1c1681e01ea3c274bb1093a5666b"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.656537 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"71430381fae101618ae61e001dc6e16fd7ea79336b50af0d653500a93eaed8e8"} Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679407 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679484 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-slash\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679507 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-log-socket\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679528 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-systemd-units\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679533 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679547 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-run-ovn-kubernetes\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679590 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-run-ovn-kubernetes\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679629 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-systemd\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679638 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-slash\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679657 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-systemd-units\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679739 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-log-socket\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679771 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-systemd\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679804 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovn-node-metrics-cert\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679835 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-cni-bin\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679920 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-cni-bin\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.679979 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-etc-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680024 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-env-overrides\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680082 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovnkube-script-lib\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680111 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8zhbj\" (UniqueName: \"kubernetes.io/projected/ded11e77-4b8a-4208-a427-7656b31b9ba5-kube-api-access-8zhbj\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680158 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-ovn\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680200 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-var-lib-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680272 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680309 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-run-netns\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680338 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-cni-netd\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680386 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovnkube-config\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680460 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-run-ovn\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680496 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-etc-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680513 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-var-lib-openvswitch\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680768 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-run-netns\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.680800 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-cni-netd\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681362 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-env-overrides\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681369 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovnkube-script-lib\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681403 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681501 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-node-log\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681540 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-kubelet\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681598 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovnkube-config\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681637 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-node-log\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.681636 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/ded11e77-4b8a-4208-a427-7656b31b9ba5-host-kubelet\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.694391 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/ded11e77-4b8a-4208-a427-7656b31b9ba5-ovn-node-metrics-cert\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.695614 4783 scope.go:117] "RemoveContainer" containerID="6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.702064 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8zhbj\" (UniqueName: \"kubernetes.io/projected/ded11e77-4b8a-4208-a427-7656b31b9ba5-kube-api-access-8zhbj\") pod \"ovnkube-node-f6ws2\" (UID: \"ded11e77-4b8a-4208-a427-7656b31b9ba5\") " pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.745767 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.747237 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.751284 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qmd84"] Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.755934 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-qmd84"] Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.769218 4783 scope.go:117] "RemoveContainer" containerID="ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" Oct 02 11:05:52 crc kubenswrapper[4783]: W1002 11:05:52.773294 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podded11e77_4b8a_4208_a427_7656b31b9ba5.slice/crio-ba142c0f673363f14c0b186ea7c0bebd726005e6b2e2a2ece9d43aa32db36225 WatchSource:0}: Error finding container ba142c0f673363f14c0b186ea7c0bebd726005e6b2e2a2ece9d43aa32db36225: Status 404 returned error can't find the container with id ba142c0f673363f14c0b186ea7c0bebd726005e6b2e2a2ece9d43aa32db36225 Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.797609 4783 scope.go:117] "RemoveContainer" containerID="a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.812997 4783 scope.go:117] "RemoveContainer" containerID="38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.843594 4783 scope.go:117] "RemoveContainer" containerID="e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.858753 4783 scope.go:117] "RemoveContainer" containerID="9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.873486 4783 scope.go:117] "RemoveContainer" containerID="abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.891831 4783 scope.go:117] "RemoveContainer" containerID="cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.954744 4783 scope.go:117] "RemoveContainer" containerID="3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.972360 4783 scope.go:117] "RemoveContainer" containerID="6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.973027 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": container with ID starting with 6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5 not found: ID does not exist" containerID="6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.973075 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} err="failed to get container status \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": rpc error: code = NotFound desc = could not find container \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": container with ID starting with 6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.973107 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.973531 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": container with ID starting with 28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a not found: ID does not exist" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.973588 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} err="failed to get container status \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": rpc error: code = NotFound desc = could not find container \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": container with ID starting with 28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.973619 4783 scope.go:117] "RemoveContainer" containerID="ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.974332 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": container with ID starting with ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f not found: ID does not exist" containerID="ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.974364 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} err="failed to get container status \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": rpc error: code = NotFound desc = could not find container \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": container with ID starting with ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.974383 4783 scope.go:117] "RemoveContainer" containerID="a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.975022 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": container with ID starting with a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3 not found: ID does not exist" containerID="a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.975054 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} err="failed to get container status \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": rpc error: code = NotFound desc = could not find container \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": container with ID starting with a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.975072 4783 scope.go:117] "RemoveContainer" containerID="38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.975454 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": container with ID starting with 38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d not found: ID does not exist" containerID="38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.975515 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} err="failed to get container status \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": rpc error: code = NotFound desc = could not find container \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": container with ID starting with 38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.975559 4783 scope.go:117] "RemoveContainer" containerID="e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.975897 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": container with ID starting with e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b not found: ID does not exist" containerID="e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.975927 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} err="failed to get container status \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": rpc error: code = NotFound desc = could not find container \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": container with ID starting with e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.975946 4783 scope.go:117] "RemoveContainer" containerID="9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.976212 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": container with ID starting with 9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e not found: ID does not exist" containerID="9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.976238 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} err="failed to get container status \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": rpc error: code = NotFound desc = could not find container \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": container with ID starting with 9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.976256 4783 scope.go:117] "RemoveContainer" containerID="abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.976535 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": container with ID starting with abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861 not found: ID does not exist" containerID="abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.976565 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} err="failed to get container status \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": rpc error: code = NotFound desc = could not find container \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": container with ID starting with abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.976584 4783 scope.go:117] "RemoveContainer" containerID="cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.977204 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": container with ID starting with cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e not found: ID does not exist" containerID="cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.977234 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} err="failed to get container status \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": rpc error: code = NotFound desc = could not find container \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": container with ID starting with cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.977253 4783 scope.go:117] "RemoveContainer" containerID="3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54" Oct 02 11:05:52 crc kubenswrapper[4783]: E1002 11:05:52.977602 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": container with ID starting with 3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54 not found: ID does not exist" containerID="3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.977644 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} err="failed to get container status \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": rpc error: code = NotFound desc = could not find container \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": container with ID starting with 3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.977678 4783 scope.go:117] "RemoveContainer" containerID="6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.978069 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} err="failed to get container status \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": rpc error: code = NotFound desc = could not find container \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": container with ID starting with 6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.978098 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.978359 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} err="failed to get container status \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": rpc error: code = NotFound desc = could not find container \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": container with ID starting with 28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.978388 4783 scope.go:117] "RemoveContainer" containerID="ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.978717 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} err="failed to get container status \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": rpc error: code = NotFound desc = could not find container \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": container with ID starting with ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.978759 4783 scope.go:117] "RemoveContainer" containerID="a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.980617 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} err="failed to get container status \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": rpc error: code = NotFound desc = could not find container \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": container with ID starting with a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.980674 4783 scope.go:117] "RemoveContainer" containerID="38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981097 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} err="failed to get container status \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": rpc error: code = NotFound desc = could not find container \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": container with ID starting with 38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981128 4783 scope.go:117] "RemoveContainer" containerID="e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981430 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} err="failed to get container status \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": rpc error: code = NotFound desc = could not find container \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": container with ID starting with e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981458 4783 scope.go:117] "RemoveContainer" containerID="9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981717 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} err="failed to get container status \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": rpc error: code = NotFound desc = could not find container \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": container with ID starting with 9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981745 4783 scope.go:117] "RemoveContainer" containerID="abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981971 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} err="failed to get container status \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": rpc error: code = NotFound desc = could not find container \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": container with ID starting with abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.981996 4783 scope.go:117] "RemoveContainer" containerID="cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.982292 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} err="failed to get container status \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": rpc error: code = NotFound desc = could not find container \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": container with ID starting with cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.982334 4783 scope.go:117] "RemoveContainer" containerID="3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.982730 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} err="failed to get container status \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": rpc error: code = NotFound desc = could not find container \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": container with ID starting with 3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.982758 4783 scope.go:117] "RemoveContainer" containerID="6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.983293 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} err="failed to get container status \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": rpc error: code = NotFound desc = could not find container \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": container with ID starting with 6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.983320 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.983639 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} err="failed to get container status \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": rpc error: code = NotFound desc = could not find container \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": container with ID starting with 28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.983679 4783 scope.go:117] "RemoveContainer" containerID="ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.984223 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} err="failed to get container status \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": rpc error: code = NotFound desc = could not find container \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": container with ID starting with ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.984248 4783 scope.go:117] "RemoveContainer" containerID="a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.984566 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} err="failed to get container status \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": rpc error: code = NotFound desc = could not find container \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": container with ID starting with a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.984603 4783 scope.go:117] "RemoveContainer" containerID="38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.984930 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} err="failed to get container status \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": rpc error: code = NotFound desc = could not find container \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": container with ID starting with 38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.984979 4783 scope.go:117] "RemoveContainer" containerID="e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.985237 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} err="failed to get container status \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": rpc error: code = NotFound desc = could not find container \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": container with ID starting with e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.985273 4783 scope.go:117] "RemoveContainer" containerID="9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.985627 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} err="failed to get container status \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": rpc error: code = NotFound desc = could not find container \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": container with ID starting with 9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.985653 4783 scope.go:117] "RemoveContainer" containerID="abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.985874 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} err="failed to get container status \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": rpc error: code = NotFound desc = could not find container \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": container with ID starting with abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.985904 4783 scope.go:117] "RemoveContainer" containerID="cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.986163 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} err="failed to get container status \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": rpc error: code = NotFound desc = could not find container \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": container with ID starting with cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.986188 4783 scope.go:117] "RemoveContainer" containerID="3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.986394 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} err="failed to get container status \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": rpc error: code = NotFound desc = could not find container \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": container with ID starting with 3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.986441 4783 scope.go:117] "RemoveContainer" containerID="6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.987140 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5"} err="failed to get container status \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": rpc error: code = NotFound desc = could not find container \"6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5\": container with ID starting with 6b3cc167cd7b063c3bc27babf6fb353fa69b70a0594d01dae62b30d182f97ad5 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.987165 4783 scope.go:117] "RemoveContainer" containerID="28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.987477 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a"} err="failed to get container status \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": rpc error: code = NotFound desc = could not find container \"28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a\": container with ID starting with 28a2dfa47c9574c9dbe9d258a082c63f6430fc57d7bf234a4453042d2026460a not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.987501 4783 scope.go:117] "RemoveContainer" containerID="ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.987753 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f"} err="failed to get container status \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": rpc error: code = NotFound desc = could not find container \"ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f\": container with ID starting with ee4d20dda995592fd5cb04d3e66d9cd14bcd750247f1465ecf6ad63dfc83e18f not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.987777 4783 scope.go:117] "RemoveContainer" containerID="a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988035 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3"} err="failed to get container status \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": rpc error: code = NotFound desc = could not find container \"a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3\": container with ID starting with a53de1205b28ca9f406f1d59b1292c77aae96e4ca16e025cfbf94a67e7f8a4f3 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988056 4783 scope.go:117] "RemoveContainer" containerID="38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988273 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d"} err="failed to get container status \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": rpc error: code = NotFound desc = could not find container \"38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d\": container with ID starting with 38dbf15399d60f9eafca79bfdc734a04a781f767147d3d50de668a33bb898f6d not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988292 4783 scope.go:117] "RemoveContainer" containerID="e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988540 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b"} err="failed to get container status \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": rpc error: code = NotFound desc = could not find container \"e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b\": container with ID starting with e811177dafad3362841385382525ed80a5ba64d537a4ba9bd2667fbdeb53a18b not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988570 4783 scope.go:117] "RemoveContainer" containerID="9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988862 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e"} err="failed to get container status \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": rpc error: code = NotFound desc = could not find container \"9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e\": container with ID starting with 9e3a605738b7c436c09b9a85d0ef5bcfdf32371dd82193ee9169e3b91793624e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.988890 4783 scope.go:117] "RemoveContainer" containerID="abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.989182 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861"} err="failed to get container status \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": rpc error: code = NotFound desc = could not find container \"abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861\": container with ID starting with abcfe55f4beddb6cb3dd45e76d570806ea3104629cf90b58a953bd493e9d4861 not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.989227 4783 scope.go:117] "RemoveContainer" containerID="cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.990199 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e"} err="failed to get container status \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": rpc error: code = NotFound desc = could not find container \"cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e\": container with ID starting with cde1412f2a776cda0c90feb986be7f21c6b066948857766cdf7a7f739a3cab8e not found: ID does not exist" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.990226 4783 scope.go:117] "RemoveContainer" containerID="3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54" Oct 02 11:05:52 crc kubenswrapper[4783]: I1002 11:05:52.990481 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54"} err="failed to get container status \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": rpc error: code = NotFound desc = could not find container \"3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54\": container with ID starting with 3c978bda9369fbc566124b64a392ae577a781cbf041f5cc5e34ede79c73bea54 not found: ID does not exist" Oct 02 11:05:53 crc kubenswrapper[4783]: I1002 11:05:53.555719 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="095cdcdf-1ea0-40da-871a-1223c6737377" path="/var/lib/kubelet/pods/095cdcdf-1ea0-40da-871a-1223c6737377/volumes" Oct 02 11:05:53 crc kubenswrapper[4783]: I1002 11:05:53.665901 4783 generic.go:334] "Generic (PLEG): container finished" podID="ded11e77-4b8a-4208-a427-7656b31b9ba5" containerID="37786e85421707ff5e9fc35c7a392a1e1a8af81778ecdaa384b184f67914ec7a" exitCode=0 Oct 02 11:05:53 crc kubenswrapper[4783]: I1002 11:05:53.665988 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerDied","Data":"37786e85421707ff5e9fc35c7a392a1e1a8af81778ecdaa384b184f67914ec7a"} Oct 02 11:05:53 crc kubenswrapper[4783]: I1002 11:05:53.666036 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"ba142c0f673363f14c0b186ea7c0bebd726005e6b2e2a2ece9d43aa32db36225"} Oct 02 11:05:53 crc kubenswrapper[4783]: I1002 11:05:53.668277 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-wmn4g_f6c8d5bc-163f-401f-bdc5-4625112dced9/kube-multus/2.log" Oct 02 11:05:53 crc kubenswrapper[4783]: I1002 11:05:53.668331 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-wmn4g" event={"ID":"f6c8d5bc-163f-401f-bdc5-4625112dced9","Type":"ContainerStarted","Data":"c3c2014b5babe3d04e2fc4f9698df950fb78d0c9dce3c860491ff4ec590a12a0"} Oct 02 11:05:54 crc kubenswrapper[4783]: I1002 11:05:54.680582 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"0ed2ccf3789161dd0abb6f701f7ec96581d4b22a1d0211ebe396292e48a198a0"} Oct 02 11:05:54 crc kubenswrapper[4783]: I1002 11:05:54.680985 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"3f18dc92c12d166c2f70fa45b5453522af867abe9b2beb5e8dd17c2ad095d259"} Oct 02 11:05:54 crc kubenswrapper[4783]: I1002 11:05:54.681004 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"4d6560aac496a794c53b778c6233c09a8c82ecdfe7e8254f2d08f010137f53c4"} Oct 02 11:05:54 crc kubenswrapper[4783]: I1002 11:05:54.681023 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"166409429f45def3b39051ecf84935dc5b93c9df5bbd919a242a13112620bf0c"} Oct 02 11:05:54 crc kubenswrapper[4783]: I1002 11:05:54.681040 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"6e548befd0cdb3dd49a04c89f75a0eba8ae93fe75d1d8046bada0800f5146262"} Oct 02 11:05:54 crc kubenswrapper[4783]: I1002 11:05:54.681059 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"e9e77bbdfcb75d99d5c122e636b27e1ba65bb05d49029ea47fe85b26d9ec2415"} Oct 02 11:05:56 crc kubenswrapper[4783]: I1002 11:05:56.702086 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"14de69dedd99e9c8c85bd6d00835c185903a403d363eb7adedce12201562e63b"} Oct 02 11:05:59 crc kubenswrapper[4783]: I1002 11:05:59.738440 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" event={"ID":"ded11e77-4b8a-4208-a427-7656b31b9ba5","Type":"ContainerStarted","Data":"36a05bba6df3daa65860425366457fbd775057e97b2d060caefddc7b5d40ff36"} Oct 02 11:05:59 crc kubenswrapper[4783]: I1002 11:05:59.738709 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:59 crc kubenswrapper[4783]: I1002 11:05:59.738726 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:59 crc kubenswrapper[4783]: I1002 11:05:59.738739 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:59 crc kubenswrapper[4783]: I1002 11:05:59.763825 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:59 crc kubenswrapper[4783]: I1002 11:05:59.764304 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:05:59 crc kubenswrapper[4783]: I1002 11:05:59.772303 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" podStartSLOduration=7.7722889429999995 podStartE2EDuration="7.772288943s" podCreationTimestamp="2025-10-02 11:05:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:05:59.770091103 +0000 UTC m=+793.086285374" watchObservedRunningTime="2025-10-02 11:05:59.772288943 +0000 UTC m=+793.088483204" Oct 02 11:06:22 crc kubenswrapper[4783]: I1002 11:06:22.775341 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-f6ws2" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.040040 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx"] Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.042320 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.046545 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.052450 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx"] Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.095481 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7m9h9\" (UniqueName: \"kubernetes.io/projected/09df6785-86ca-4bd5-958d-931c38c75084-kube-api-access-7m9h9\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.095527 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.095608 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.196736 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.196818 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7m9h9\" (UniqueName: \"kubernetes.io/projected/09df6785-86ca-4bd5-958d-931c38c75084-kube-api-access-7m9h9\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.196855 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.198186 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-bundle\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.198190 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-util\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.221383 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7m9h9\" (UniqueName: \"kubernetes.io/projected/09df6785-86ca-4bd5-958d-931c38c75084-kube-api-access-7m9h9\") pod \"fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.368010 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.782124 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx"] Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.964607 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" event={"ID":"09df6785-86ca-4bd5-958d-931c38c75084","Type":"ContainerStarted","Data":"40bdf736bc67767462576efba38c42ccace4c6be7ca45de4aa43e24bfc34e01b"} Oct 02 11:06:32 crc kubenswrapper[4783]: I1002 11:06:32.965001 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" event={"ID":"09df6785-86ca-4bd5-958d-931c38c75084","Type":"ContainerStarted","Data":"ca7b27fc6a3aa41ae537898467e60ec16838647bad9e8474082d39f046285238"} Oct 02 11:06:33 crc kubenswrapper[4783]: I1002 11:06:33.989326 4783 generic.go:334] "Generic (PLEG): container finished" podID="09df6785-86ca-4bd5-958d-931c38c75084" containerID="40bdf736bc67767462576efba38c42ccace4c6be7ca45de4aa43e24bfc34e01b" exitCode=0 Oct 02 11:06:33 crc kubenswrapper[4783]: I1002 11:06:33.989384 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" event={"ID":"09df6785-86ca-4bd5-958d-931c38c75084","Type":"ContainerDied","Data":"40bdf736bc67767462576efba38c42ccace4c6be7ca45de4aa43e24bfc34e01b"} Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.397021 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qfq6b"] Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.399349 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.412757 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qfq6b"] Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.524252 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-utilities\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.524341 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w5j79\" (UniqueName: \"kubernetes.io/projected/386dc688-2c2e-4a4c-859a-13280caf6b7b-kube-api-access-w5j79\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.524398 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-catalog-content\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.625384 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-catalog-content\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.625478 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-utilities\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.625581 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w5j79\" (UniqueName: \"kubernetes.io/projected/386dc688-2c2e-4a4c-859a-13280caf6b7b-kube-api-access-w5j79\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.625957 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-utilities\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.625977 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-catalog-content\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.645020 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w5j79\" (UniqueName: \"kubernetes.io/projected/386dc688-2c2e-4a4c-859a-13280caf6b7b-kube-api-access-w5j79\") pod \"redhat-operators-qfq6b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:34 crc kubenswrapper[4783]: I1002 11:06:34.730890 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:35 crc kubenswrapper[4783]: I1002 11:06:35.124196 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qfq6b"] Oct 02 11:06:35 crc kubenswrapper[4783]: W1002 11:06:35.128282 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod386dc688_2c2e_4a4c_859a_13280caf6b7b.slice/crio-1d6c0e93f7623eab3a3bdc8f61dc11c11e4c08a1a0f20d4f2af6d66a79d300e6 WatchSource:0}: Error finding container 1d6c0e93f7623eab3a3bdc8f61dc11c11e4c08a1a0f20d4f2af6d66a79d300e6: Status 404 returned error can't find the container with id 1d6c0e93f7623eab3a3bdc8f61dc11c11e4c08a1a0f20d4f2af6d66a79d300e6 Oct 02 11:06:36 crc kubenswrapper[4783]: I1002 11:06:36.004067 4783 generic.go:334] "Generic (PLEG): container finished" podID="09df6785-86ca-4bd5-958d-931c38c75084" containerID="35affda8038858da1f65c072a1e800d6390086f4ce6907eadef96e0c3d2aeaba" exitCode=0 Oct 02 11:06:36 crc kubenswrapper[4783]: I1002 11:06:36.004167 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" event={"ID":"09df6785-86ca-4bd5-958d-931c38c75084","Type":"ContainerDied","Data":"35affda8038858da1f65c072a1e800d6390086f4ce6907eadef96e0c3d2aeaba"} Oct 02 11:06:36 crc kubenswrapper[4783]: I1002 11:06:36.007385 4783 generic.go:334] "Generic (PLEG): container finished" podID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerID="269bf56f5fdaf6cd9ef3b6646ee0ab4ceb4f5afc0ec73070c433cff2c645d018" exitCode=0 Oct 02 11:06:36 crc kubenswrapper[4783]: I1002 11:06:36.007493 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qfq6b" event={"ID":"386dc688-2c2e-4a4c-859a-13280caf6b7b","Type":"ContainerDied","Data":"269bf56f5fdaf6cd9ef3b6646ee0ab4ceb4f5afc0ec73070c433cff2c645d018"} Oct 02 11:06:36 crc kubenswrapper[4783]: I1002 11:06:36.007564 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qfq6b" event={"ID":"386dc688-2c2e-4a4c-859a-13280caf6b7b","Type":"ContainerStarted","Data":"1d6c0e93f7623eab3a3bdc8f61dc11c11e4c08a1a0f20d4f2af6d66a79d300e6"} Oct 02 11:06:37 crc kubenswrapper[4783]: I1002 11:06:37.018035 4783 generic.go:334] "Generic (PLEG): container finished" podID="09df6785-86ca-4bd5-958d-931c38c75084" containerID="9e13ba816a85d2b8eab41b346e72b3da0262979fd6fda96c48d33025d5891b27" exitCode=0 Oct 02 11:06:37 crc kubenswrapper[4783]: I1002 11:06:37.018152 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" event={"ID":"09df6785-86ca-4bd5-958d-931c38c75084","Type":"ContainerDied","Data":"9e13ba816a85d2b8eab41b346e72b3da0262979fd6fda96c48d33025d5891b27"} Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.024213 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qfq6b" event={"ID":"386dc688-2c2e-4a4c-859a-13280caf6b7b","Type":"ContainerStarted","Data":"e4122273f78ad7cc32cda5c87a87b292e863e20dceb04b62db667e12600ed1e6"} Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.271105 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.367800 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-bundle\") pod \"09df6785-86ca-4bd5-958d-931c38c75084\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.367942 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7m9h9\" (UniqueName: \"kubernetes.io/projected/09df6785-86ca-4bd5-958d-931c38c75084-kube-api-access-7m9h9\") pod \"09df6785-86ca-4bd5-958d-931c38c75084\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.368012 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-util\") pod \"09df6785-86ca-4bd5-958d-931c38c75084\" (UID: \"09df6785-86ca-4bd5-958d-931c38c75084\") " Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.368951 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-bundle" (OuterVolumeSpecName: "bundle") pod "09df6785-86ca-4bd5-958d-931c38c75084" (UID: "09df6785-86ca-4bd5-958d-931c38c75084"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.374349 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09df6785-86ca-4bd5-958d-931c38c75084-kube-api-access-7m9h9" (OuterVolumeSpecName: "kube-api-access-7m9h9") pod "09df6785-86ca-4bd5-958d-931c38c75084" (UID: "09df6785-86ca-4bd5-958d-931c38c75084"). InnerVolumeSpecName "kube-api-access-7m9h9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.469549 4783 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.469591 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7m9h9\" (UniqueName: \"kubernetes.io/projected/09df6785-86ca-4bd5-958d-931c38c75084-kube-api-access-7m9h9\") on node \"crc\" DevicePath \"\"" Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.591235 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-util" (OuterVolumeSpecName: "util") pod "09df6785-86ca-4bd5-958d-931c38c75084" (UID: "09df6785-86ca-4bd5-958d-931c38c75084"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:06:38 crc kubenswrapper[4783]: I1002 11:06:38.672126 4783 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/09df6785-86ca-4bd5-958d-931c38c75084-util\") on node \"crc\" DevicePath \"\"" Oct 02 11:06:39 crc kubenswrapper[4783]: I1002 11:06:39.030713 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" event={"ID":"09df6785-86ca-4bd5-958d-931c38c75084","Type":"ContainerDied","Data":"ca7b27fc6a3aa41ae537898467e60ec16838647bad9e8474082d39f046285238"} Oct 02 11:06:39 crc kubenswrapper[4783]: I1002 11:06:39.030759 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca7b27fc6a3aa41ae537898467e60ec16838647bad9e8474082d39f046285238" Oct 02 11:06:39 crc kubenswrapper[4783]: I1002 11:06:39.030723 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx" Oct 02 11:06:39 crc kubenswrapper[4783]: I1002 11:06:39.032447 4783 generic.go:334] "Generic (PLEG): container finished" podID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerID="e4122273f78ad7cc32cda5c87a87b292e863e20dceb04b62db667e12600ed1e6" exitCode=0 Oct 02 11:06:39 crc kubenswrapper[4783]: I1002 11:06:39.032502 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qfq6b" event={"ID":"386dc688-2c2e-4a4c-859a-13280caf6b7b","Type":"ContainerDied","Data":"e4122273f78ad7cc32cda5c87a87b292e863e20dceb04b62db667e12600ed1e6"} Oct 02 11:06:40 crc kubenswrapper[4783]: I1002 11:06:40.043547 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qfq6b" event={"ID":"386dc688-2c2e-4a4c-859a-13280caf6b7b","Type":"ContainerStarted","Data":"f710cc6d69309b85d0a268a149a107b39f7eba2109ab574ee60268ec1f388c34"} Oct 02 11:06:40 crc kubenswrapper[4783]: I1002 11:06:40.066224 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qfq6b" podStartSLOduration=2.574470197 podStartE2EDuration="6.066193335s" podCreationTimestamp="2025-10-02 11:06:34 +0000 UTC" firstStartedPulling="2025-10-02 11:06:36.008604613 +0000 UTC m=+829.324798874" lastFinishedPulling="2025-10-02 11:06:39.500327751 +0000 UTC m=+832.816522012" observedRunningTime="2025-10-02 11:06:40.064682034 +0000 UTC m=+833.380876315" watchObservedRunningTime="2025-10-02 11:06:40.066193335 +0000 UTC m=+833.382387636" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.524617 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g"] Oct 02 11:06:42 crc kubenswrapper[4783]: E1002 11:06:42.524825 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09df6785-86ca-4bd5-958d-931c38c75084" containerName="pull" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.524837 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="09df6785-86ca-4bd5-958d-931c38c75084" containerName="pull" Oct 02 11:06:42 crc kubenswrapper[4783]: E1002 11:06:42.524856 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09df6785-86ca-4bd5-958d-931c38c75084" containerName="util" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.524861 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="09df6785-86ca-4bd5-958d-931c38c75084" containerName="util" Oct 02 11:06:42 crc kubenswrapper[4783]: E1002 11:06:42.524868 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09df6785-86ca-4bd5-958d-931c38c75084" containerName="extract" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.524874 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="09df6785-86ca-4bd5-958d-931c38c75084" containerName="extract" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.524961 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="09df6785-86ca-4bd5-958d-931c38c75084" containerName="extract" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.525355 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.530250 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-55c97" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.530457 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.530594 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.543756 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g"] Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.622397 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9jnwr\" (UniqueName: \"kubernetes.io/projected/f6125a3c-e2a7-45aa-be54-af390730c09a-kube-api-access-9jnwr\") pod \"nmstate-operator-858ddd8f98-4wb7g\" (UID: \"f6125a3c-e2a7-45aa-be54-af390730c09a\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.723977 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9jnwr\" (UniqueName: \"kubernetes.io/projected/f6125a3c-e2a7-45aa-be54-af390730c09a-kube-api-access-9jnwr\") pod \"nmstate-operator-858ddd8f98-4wb7g\" (UID: \"f6125a3c-e2a7-45aa-be54-af390730c09a\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.766302 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9jnwr\" (UniqueName: \"kubernetes.io/projected/f6125a3c-e2a7-45aa-be54-af390730c09a-kube-api-access-9jnwr\") pod \"nmstate-operator-858ddd8f98-4wb7g\" (UID: \"f6125a3c-e2a7-45aa-be54-af390730c09a\") " pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" Oct 02 11:06:42 crc kubenswrapper[4783]: I1002 11:06:42.842618 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" Oct 02 11:06:43 crc kubenswrapper[4783]: I1002 11:06:43.249632 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g"] Oct 02 11:06:44 crc kubenswrapper[4783]: I1002 11:06:44.066978 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" event={"ID":"f6125a3c-e2a7-45aa-be54-af390730c09a","Type":"ContainerStarted","Data":"c9c19a6c985f4765d7a012c9915de709063535a2c293e0c143d9e517f408acec"} Oct 02 11:06:44 crc kubenswrapper[4783]: I1002 11:06:44.731314 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:44 crc kubenswrapper[4783]: I1002 11:06:44.731364 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:44 crc kubenswrapper[4783]: I1002 11:06:44.769740 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:45 crc kubenswrapper[4783]: I1002 11:06:45.102818 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:47 crc kubenswrapper[4783]: I1002 11:06:47.181101 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qfq6b"] Oct 02 11:06:47 crc kubenswrapper[4783]: I1002 11:06:47.181762 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qfq6b" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="registry-server" containerID="cri-o://f710cc6d69309b85d0a268a149a107b39f7eba2109ab574ee60268ec1f388c34" gracePeriod=2 Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.088460 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qfq6b" event={"ID":"386dc688-2c2e-4a4c-859a-13280caf6b7b","Type":"ContainerDied","Data":"f710cc6d69309b85d0a268a149a107b39f7eba2109ab574ee60268ec1f388c34"} Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.088193 4783 generic.go:334] "Generic (PLEG): container finished" podID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerID="f710cc6d69309b85d0a268a149a107b39f7eba2109ab574ee60268ec1f388c34" exitCode=0 Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.820700 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.913558 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-utilities\") pod \"386dc688-2c2e-4a4c-859a-13280caf6b7b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.913968 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-catalog-content\") pod \"386dc688-2c2e-4a4c-859a-13280caf6b7b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.914079 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w5j79\" (UniqueName: \"kubernetes.io/projected/386dc688-2c2e-4a4c-859a-13280caf6b7b-kube-api-access-w5j79\") pod \"386dc688-2c2e-4a4c-859a-13280caf6b7b\" (UID: \"386dc688-2c2e-4a4c-859a-13280caf6b7b\") " Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.914447 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-utilities" (OuterVolumeSpecName: "utilities") pod "386dc688-2c2e-4a4c-859a-13280caf6b7b" (UID: "386dc688-2c2e-4a4c-859a-13280caf6b7b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.914799 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:06:48 crc kubenswrapper[4783]: I1002 11:06:48.921347 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/386dc688-2c2e-4a4c-859a-13280caf6b7b-kube-api-access-w5j79" (OuterVolumeSpecName: "kube-api-access-w5j79") pod "386dc688-2c2e-4a4c-859a-13280caf6b7b" (UID: "386dc688-2c2e-4a4c-859a-13280caf6b7b"). InnerVolumeSpecName "kube-api-access-w5j79". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.003123 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "386dc688-2c2e-4a4c-859a-13280caf6b7b" (UID: "386dc688-2c2e-4a4c-859a-13280caf6b7b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.016283 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/386dc688-2c2e-4a4c-859a-13280caf6b7b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.016310 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w5j79\" (UniqueName: \"kubernetes.io/projected/386dc688-2c2e-4a4c-859a-13280caf6b7b-kube-api-access-w5j79\") on node \"crc\" DevicePath \"\"" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.096100 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qfq6b" event={"ID":"386dc688-2c2e-4a4c-859a-13280caf6b7b","Type":"ContainerDied","Data":"1d6c0e93f7623eab3a3bdc8f61dc11c11e4c08a1a0f20d4f2af6d66a79d300e6"} Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.096173 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qfq6b" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.096181 4783 scope.go:117] "RemoveContainer" containerID="f710cc6d69309b85d0a268a149a107b39f7eba2109ab574ee60268ec1f388c34" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.117152 4783 scope.go:117] "RemoveContainer" containerID="e4122273f78ad7cc32cda5c87a87b292e863e20dceb04b62db667e12600ed1e6" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.128677 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qfq6b"] Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.133253 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qfq6b"] Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.137340 4783 scope.go:117] "RemoveContainer" containerID="269bf56f5fdaf6cd9ef3b6646ee0ab4ceb4f5afc0ec73070c433cff2c645d018" Oct 02 11:06:49 crc kubenswrapper[4783]: I1002 11:06:49.557516 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" path="/var/lib/kubelet/pods/386dc688-2c2e-4a4c-859a-13280caf6b7b/volumes" Oct 02 11:06:50 crc kubenswrapper[4783]: I1002 11:06:50.109900 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" event={"ID":"f6125a3c-e2a7-45aa-be54-af390730c09a","Type":"ContainerStarted","Data":"a3aafea629a2fa198ab89593c33fd004bd37ac6cf8ff3d5a1e56ded597c4a9b2"} Oct 02 11:06:50 crc kubenswrapper[4783]: I1002 11:06:50.138399 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-858ddd8f98-4wb7g" podStartSLOduration=2.535951795 podStartE2EDuration="8.13837701s" podCreationTimestamp="2025-10-02 11:06:42 +0000 UTC" firstStartedPulling="2025-10-02 11:06:43.26286008 +0000 UTC m=+836.579054341" lastFinishedPulling="2025-10-02 11:06:48.865285245 +0000 UTC m=+842.181479556" observedRunningTime="2025-10-02 11:06:50.137842086 +0000 UTC m=+843.454036387" watchObservedRunningTime="2025-10-02 11:06:50.13837701 +0000 UTC m=+843.454571311" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.530706 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg"] Oct 02 11:06:52 crc kubenswrapper[4783]: E1002 11:06:52.531135 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="extract-utilities" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.531150 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="extract-utilities" Oct 02 11:06:52 crc kubenswrapper[4783]: E1002 11:06:52.531164 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="registry-server" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.531171 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="registry-server" Oct 02 11:06:52 crc kubenswrapper[4783]: E1002 11:06:52.531182 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="extract-content" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.531189 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="extract-content" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.531285 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="386dc688-2c2e-4a4c-859a-13280caf6b7b" containerName="registry-server" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.531821 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.534701 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-4ldnh" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.558425 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72"] Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.559271 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.562660 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg"] Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.563568 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.594958 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-9kdkg"] Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.595813 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.640321 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72"] Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.663719 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-dbus-socket\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.663820 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-nmstate-lock\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.663854 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjn6k\" (UniqueName: \"kubernetes.io/projected/dd90f287-398b-473a-b421-57a85abdca9a-kube-api-access-sjn6k\") pod \"nmstate-metrics-fdff9cb8d-m8ljg\" (UID: \"dd90f287-398b-473a-b421-57a85abdca9a\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.663872 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dqqfx\" (UniqueName: \"kubernetes.io/projected/d95fff49-fc1c-4c9c-af21-f6058ede50d5-kube-api-access-dqqfx\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.663903 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-ovs-socket\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.664017 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ldc5k\" (UniqueName: \"kubernetes.io/projected/051f4c7b-ead6-4bd5-89d4-b64cf47f24d5-kube-api-access-ldc5k\") pod \"nmstate-webhook-6cdbc54649-f8h72\" (UID: \"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.664088 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/051f4c7b-ead6-4bd5-89d4-b64cf47f24d5-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-f8h72\" (UID: \"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.691831 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv"] Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.692447 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.697072 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-5klp4" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.697330 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.697336 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.704991 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv"] Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.765600 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjn6k\" (UniqueName: \"kubernetes.io/projected/dd90f287-398b-473a-b421-57a85abdca9a-kube-api-access-sjn6k\") pod \"nmstate-metrics-fdff9cb8d-m8ljg\" (UID: \"dd90f287-398b-473a-b421-57a85abdca9a\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.765889 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dqqfx\" (UniqueName: \"kubernetes.io/projected/d95fff49-fc1c-4c9c-af21-f6058ede50d5-kube-api-access-dqqfx\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.765947 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d206e83f-c7d7-4a26-961b-7649d60646a0-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.765983 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-ovs-socket\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766039 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/051f4c7b-ead6-4bd5-89d4-b64cf47f24d5-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-f8h72\" (UID: \"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766064 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ldc5k\" (UniqueName: \"kubernetes.io/projected/051f4c7b-ead6-4bd5-89d4-b64cf47f24d5-kube-api-access-ldc5k\") pod \"nmstate-webhook-6cdbc54649-f8h72\" (UID: \"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766112 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d206e83f-c7d7-4a26-961b-7649d60646a0-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766136 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-dbus-socket\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766214 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzrjp\" (UniqueName: \"kubernetes.io/projected/d206e83f-c7d7-4a26-961b-7649d60646a0-kube-api-access-mzrjp\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766308 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-ovs-socket\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766383 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-nmstate-lock\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766443 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-nmstate-lock\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.766505 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d95fff49-fc1c-4c9c-af21-f6058ede50d5-dbus-socket\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.784813 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ldc5k\" (UniqueName: \"kubernetes.io/projected/051f4c7b-ead6-4bd5-89d4-b64cf47f24d5-kube-api-access-ldc5k\") pod \"nmstate-webhook-6cdbc54649-f8h72\" (UID: \"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.786990 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/051f4c7b-ead6-4bd5-89d4-b64cf47f24d5-tls-key-pair\") pod \"nmstate-webhook-6cdbc54649-f8h72\" (UID: \"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5\") " pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.793112 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjn6k\" (UniqueName: \"kubernetes.io/projected/dd90f287-398b-473a-b421-57a85abdca9a-kube-api-access-sjn6k\") pod \"nmstate-metrics-fdff9cb8d-m8ljg\" (UID: \"dd90f287-398b-473a-b421-57a85abdca9a\") " pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.794778 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dqqfx\" (UniqueName: \"kubernetes.io/projected/d95fff49-fc1c-4c9c-af21-f6058ede50d5-kube-api-access-dqqfx\") pod \"nmstate-handler-9kdkg\" (UID: \"d95fff49-fc1c-4c9c-af21-f6058ede50d5\") " pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.852553 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.868149 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d206e83f-c7d7-4a26-961b-7649d60646a0-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.868243 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d206e83f-c7d7-4a26-961b-7649d60646a0-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.868305 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzrjp\" (UniqueName: \"kubernetes.io/projected/d206e83f-c7d7-4a26-961b-7649d60646a0-kube-api-access-mzrjp\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: E1002 11:06:52.868517 4783 secret.go:188] Couldn't get secret openshift-nmstate/plugin-serving-cert: secret "plugin-serving-cert" not found Oct 02 11:06:52 crc kubenswrapper[4783]: E1002 11:06:52.868591 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/d206e83f-c7d7-4a26-961b-7649d60646a0-plugin-serving-cert podName:d206e83f-c7d7-4a26-961b-7649d60646a0 nodeName:}" failed. No retries permitted until 2025-10-02 11:06:53.368570845 +0000 UTC m=+846.684765106 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "plugin-serving-cert" (UniqueName: "kubernetes.io/secret/d206e83f-c7d7-4a26-961b-7649d60646a0-plugin-serving-cert") pod "nmstate-console-plugin-6b874cbd85-5b9kv" (UID: "d206e83f-c7d7-4a26-961b-7649d60646a0") : secret "plugin-serving-cert" not found Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.869006 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/d206e83f-c7d7-4a26-961b-7649d60646a0-nginx-conf\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.875666 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.899484 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzrjp\" (UniqueName: \"kubernetes.io/projected/d206e83f-c7d7-4a26-961b-7649d60646a0-kube-api-access-mzrjp\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.915893 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.931857 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-5f5d66fd8f-66gkw"] Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.932754 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:52 crc kubenswrapper[4783]: I1002 11:06:52.942254 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5f5d66fd8f-66gkw"] Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.070710 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-serving-cert\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.071030 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rp9vm\" (UniqueName: \"kubernetes.io/projected/15483b1b-86dd-4e47-ae95-9a6afa7cba44-kube-api-access-rp9vm\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.071056 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-oauth-config\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.071126 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-service-ca\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.071158 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-oauth-serving-cert\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.071200 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-trusted-ca-bundle\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.071230 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-config\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.129869 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-9kdkg" event={"ID":"d95fff49-fc1c-4c9c-af21-f6058ede50d5","Type":"ContainerStarted","Data":"65a5b043017d7fcd3fa2bd1aa8b96f43ce223785bd458ae10dc960035c1a923e"} Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.172610 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-trusted-ca-bundle\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.172663 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-config\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.172715 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-serving-cert\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.172754 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rp9vm\" (UniqueName: \"kubernetes.io/projected/15483b1b-86dd-4e47-ae95-9a6afa7cba44-kube-api-access-rp9vm\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.172776 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-oauth-config\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.172833 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-service-ca\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.172858 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-oauth-serving-cert\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.173835 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-oauth-serving-cert\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.174807 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-trusted-ca-bundle\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.175185 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-config\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.178877 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-oauth-config\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.179009 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/15483b1b-86dd-4e47-ae95-9a6afa7cba44-service-ca\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.179693 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/15483b1b-86dd-4e47-ae95-9a6afa7cba44-console-serving-cert\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.192085 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rp9vm\" (UniqueName: \"kubernetes.io/projected/15483b1b-86dd-4e47-ae95-9a6afa7cba44-kube-api-access-rp9vm\") pod \"console-5f5d66fd8f-66gkw\" (UID: \"15483b1b-86dd-4e47-ae95-9a6afa7cba44\") " pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.249533 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.361198 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg"] Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.375020 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d206e83f-c7d7-4a26-961b-7649d60646a0-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.381018 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/d206e83f-c7d7-4a26-961b-7649d60646a0-plugin-serving-cert\") pod \"nmstate-console-plugin-6b874cbd85-5b9kv\" (UID: \"d206e83f-c7d7-4a26-961b-7649d60646a0\") " pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.412509 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72"] Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.429525 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-5f5d66fd8f-66gkw"] Oct 02 11:06:53 crc kubenswrapper[4783]: W1002 11:06:53.437844 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod15483b1b_86dd_4e47_ae95_9a6afa7cba44.slice/crio-d46727b7b951ddfd9e0dbcaa0c2b29133e3e71f87780b49c7d663c8708be5963 WatchSource:0}: Error finding container d46727b7b951ddfd9e0dbcaa0c2b29133e3e71f87780b49c7d663c8708be5963: Status 404 returned error can't find the container with id d46727b7b951ddfd9e0dbcaa0c2b29133e3e71f87780b49c7d663c8708be5963 Oct 02 11:06:53 crc kubenswrapper[4783]: I1002 11:06:53.608250 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" Oct 02 11:06:54 crc kubenswrapper[4783]: I1002 11:06:54.016292 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv"] Oct 02 11:06:54 crc kubenswrapper[4783]: W1002 11:06:54.026229 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd206e83f_c7d7_4a26_961b_7649d60646a0.slice/crio-b98e8d7b74161a40358a62934be5e6ca35ba57bdb2390abdf748ce7c2f46936c WatchSource:0}: Error finding container b98e8d7b74161a40358a62934be5e6ca35ba57bdb2390abdf748ce7c2f46936c: Status 404 returned error can't find the container with id b98e8d7b74161a40358a62934be5e6ca35ba57bdb2390abdf748ce7c2f46936c Oct 02 11:06:54 crc kubenswrapper[4783]: I1002 11:06:54.136172 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" event={"ID":"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5","Type":"ContainerStarted","Data":"0dd532f58944f469137f94934365d21e03a8512c3c7c6bb1a2cae07413ab9297"} Oct 02 11:06:54 crc kubenswrapper[4783]: I1002 11:06:54.137490 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" event={"ID":"dd90f287-398b-473a-b421-57a85abdca9a","Type":"ContainerStarted","Data":"3fbe400de40e0124b5479c488a7f615cd7481949fb303817cb6beea8bb972b73"} Oct 02 11:06:54 crc kubenswrapper[4783]: I1002 11:06:54.138707 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" event={"ID":"d206e83f-c7d7-4a26-961b-7649d60646a0","Type":"ContainerStarted","Data":"b98e8d7b74161a40358a62934be5e6ca35ba57bdb2390abdf748ce7c2f46936c"} Oct 02 11:06:54 crc kubenswrapper[4783]: I1002 11:06:54.140273 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5f5d66fd8f-66gkw" event={"ID":"15483b1b-86dd-4e47-ae95-9a6afa7cba44","Type":"ContainerStarted","Data":"9b87a2531e47c00d50d2c135ae4389e4a0380d3ea8c2bf89930b9692ef623338"} Oct 02 11:06:54 crc kubenswrapper[4783]: I1002 11:06:54.140303 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-5f5d66fd8f-66gkw" event={"ID":"15483b1b-86dd-4e47-ae95-9a6afa7cba44","Type":"ContainerStarted","Data":"d46727b7b951ddfd9e0dbcaa0c2b29133e3e71f87780b49c7d663c8708be5963"} Oct 02 11:06:54 crc kubenswrapper[4783]: I1002 11:06:54.166595 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-5f5d66fd8f-66gkw" podStartSLOduration=2.166575421 podStartE2EDuration="2.166575421s" podCreationTimestamp="2025-10-02 11:06:52 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:06:54.162465718 +0000 UTC m=+847.478659999" watchObservedRunningTime="2025-10-02 11:06:54.166575421 +0000 UTC m=+847.482769682" Oct 02 11:06:56 crc kubenswrapper[4783]: I1002 11:06:56.161903 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" event={"ID":"051f4c7b-ead6-4bd5-89d4-b64cf47f24d5","Type":"ContainerStarted","Data":"dc60ffc156e7093a5cbc6d91d437156ffab08447e4ef135daa5cd3626c161752"} Oct 02 11:06:56 crc kubenswrapper[4783]: I1002 11:06:56.162364 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:06:56 crc kubenswrapper[4783]: I1002 11:06:56.165956 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-9kdkg" event={"ID":"d95fff49-fc1c-4c9c-af21-f6058ede50d5","Type":"ContainerStarted","Data":"f9e84454847c8da21d09c5d6d2901562bfaca79314df3f07dfdd6a25b597dbfe"} Oct 02 11:06:56 crc kubenswrapper[4783]: I1002 11:06:56.166081 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:06:56 crc kubenswrapper[4783]: I1002 11:06:56.166962 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" event={"ID":"dd90f287-398b-473a-b421-57a85abdca9a","Type":"ContainerStarted","Data":"2e6881b2e67162ecfd168fe1eb2d1d74531b8b758bdfb3645383da3713c1f7ac"} Oct 02 11:06:56 crc kubenswrapper[4783]: I1002 11:06:56.183837 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" podStartSLOduration=1.940971544 podStartE2EDuration="4.183817736s" podCreationTimestamp="2025-10-02 11:06:52 +0000 UTC" firstStartedPulling="2025-10-02 11:06:53.421886621 +0000 UTC m=+846.738080892" lastFinishedPulling="2025-10-02 11:06:55.664732813 +0000 UTC m=+848.980927084" observedRunningTime="2025-10-02 11:06:56.178300184 +0000 UTC m=+849.494494475" watchObservedRunningTime="2025-10-02 11:06:56.183817736 +0000 UTC m=+849.500012017" Oct 02 11:06:56 crc kubenswrapper[4783]: I1002 11:06:56.195126 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-9kdkg" podStartSLOduration=1.610719035 podStartE2EDuration="4.195107917s" podCreationTimestamp="2025-10-02 11:06:52 +0000 UTC" firstStartedPulling="2025-10-02 11:06:52.986083693 +0000 UTC m=+846.302277954" lastFinishedPulling="2025-10-02 11:06:55.570472575 +0000 UTC m=+848.886666836" observedRunningTime="2025-10-02 11:06:56.19377628 +0000 UTC m=+849.509970561" watchObservedRunningTime="2025-10-02 11:06:56.195107917 +0000 UTC m=+849.511302178" Oct 02 11:06:57 crc kubenswrapper[4783]: I1002 11:06:57.175266 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" event={"ID":"d206e83f-c7d7-4a26-961b-7649d60646a0","Type":"ContainerStarted","Data":"6ce063f86f5e4c23d420d57aa1423a81ac84632362b3b884cfbd900b670d7692"} Oct 02 11:06:57 crc kubenswrapper[4783]: I1002 11:06:57.200082 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-6b874cbd85-5b9kv" podStartSLOduration=2.6129273 podStartE2EDuration="5.200059787s" podCreationTimestamp="2025-10-02 11:06:52 +0000 UTC" firstStartedPulling="2025-10-02 11:06:54.027998293 +0000 UTC m=+847.344192554" lastFinishedPulling="2025-10-02 11:06:56.61513078 +0000 UTC m=+849.931325041" observedRunningTime="2025-10-02 11:06:57.197026384 +0000 UTC m=+850.513220715" watchObservedRunningTime="2025-10-02 11:06:57.200059787 +0000 UTC m=+850.516254048" Oct 02 11:06:58 crc kubenswrapper[4783]: I1002 11:06:58.184455 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" event={"ID":"dd90f287-398b-473a-b421-57a85abdca9a","Type":"ContainerStarted","Data":"95b99ddfbc209cbfd64fb3fb9c2657385442bcbbf3162a98c8f3ddc7244b4bad"} Oct 02 11:06:59 crc kubenswrapper[4783]: I1002 11:06:59.208918 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-fdff9cb8d-m8ljg" podStartSLOduration=2.72433886 podStartE2EDuration="7.208895081s" podCreationTimestamp="2025-10-02 11:06:52 +0000 UTC" firstStartedPulling="2025-10-02 11:06:53.375616556 +0000 UTC m=+846.691810817" lastFinishedPulling="2025-10-02 11:06:57.860172767 +0000 UTC m=+851.176367038" observedRunningTime="2025-10-02 11:06:59.205122597 +0000 UTC m=+852.521316918" watchObservedRunningTime="2025-10-02 11:06:59.208895081 +0000 UTC m=+852.525089342" Oct 02 11:07:02 crc kubenswrapper[4783]: I1002 11:07:02.937700 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-9kdkg" Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.249924 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.249988 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.255770 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.872117 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cpnvn"] Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.873396 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.898571 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cpnvn"] Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.919483 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-catalog-content\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.919574 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r548c\" (UniqueName: \"kubernetes.io/projected/4e64613a-afd1-4feb-bcb3-49dea004a427-kube-api-access-r548c\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:03 crc kubenswrapper[4783]: I1002 11:07:03.919667 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-utilities\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.020819 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-catalog-content\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.020880 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r548c\" (UniqueName: \"kubernetes.io/projected/4e64613a-afd1-4feb-bcb3-49dea004a427-kube-api-access-r548c\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.020949 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-utilities\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.021425 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-utilities\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.021457 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-catalog-content\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.050164 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r548c\" (UniqueName: \"kubernetes.io/projected/4e64613a-afd1-4feb-bcb3-49dea004a427-kube-api-access-r548c\") pod \"community-operators-cpnvn\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.195466 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.223046 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-5f5d66fd8f-66gkw" Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.298104 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-vjcp4"] Oct 02 11:07:04 crc kubenswrapper[4783]: I1002 11:07:04.543579 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cpnvn"] Oct 02 11:07:04 crc kubenswrapper[4783]: W1002 11:07:04.546089 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4e64613a_afd1_4feb_bcb3_49dea004a427.slice/crio-3c9b46645f09897dd994efe187d0dec4972165d3a73c8065f3e825620cd38f3e WatchSource:0}: Error finding container 3c9b46645f09897dd994efe187d0dec4972165d3a73c8065f3e825620cd38f3e: Status 404 returned error can't find the container with id 3c9b46645f09897dd994efe187d0dec4972165d3a73c8065f3e825620cd38f3e Oct 02 11:07:05 crc kubenswrapper[4783]: I1002 11:07:05.225660 4783 generic.go:334] "Generic (PLEG): container finished" podID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerID="b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504" exitCode=0 Oct 02 11:07:05 crc kubenswrapper[4783]: I1002 11:07:05.225778 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cpnvn" event={"ID":"4e64613a-afd1-4feb-bcb3-49dea004a427","Type":"ContainerDied","Data":"b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504"} Oct 02 11:07:05 crc kubenswrapper[4783]: I1002 11:07:05.225806 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cpnvn" event={"ID":"4e64613a-afd1-4feb-bcb3-49dea004a427","Type":"ContainerStarted","Data":"3c9b46645f09897dd994efe187d0dec4972165d3a73c8065f3e825620cd38f3e"} Oct 02 11:07:06 crc kubenswrapper[4783]: I1002 11:07:06.231632 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cpnvn" event={"ID":"4e64613a-afd1-4feb-bcb3-49dea004a427","Type":"ContainerStarted","Data":"bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0"} Oct 02 11:07:07 crc kubenswrapper[4783]: I1002 11:07:07.239345 4783 generic.go:334] "Generic (PLEG): container finished" podID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerID="bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0" exitCode=0 Oct 02 11:07:07 crc kubenswrapper[4783]: I1002 11:07:07.239379 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cpnvn" event={"ID":"4e64613a-afd1-4feb-bcb3-49dea004a427","Type":"ContainerDied","Data":"bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0"} Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.051618 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7v872"] Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.054455 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.070677 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7v872"] Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.175857 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s65wp\" (UniqueName: \"kubernetes.io/projected/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-kube-api-access-s65wp\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.176012 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-utilities\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.176230 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-catalog-content\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.259931 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cpnvn" event={"ID":"4e64613a-afd1-4feb-bcb3-49dea004a427","Type":"ContainerStarted","Data":"6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56"} Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.277432 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s65wp\" (UniqueName: \"kubernetes.io/projected/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-kube-api-access-s65wp\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.277483 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-utilities\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.277514 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-catalog-content\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.278540 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-catalog-content\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.279111 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-utilities\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.281458 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cpnvn" podStartSLOduration=2.7100122779999998 podStartE2EDuration="5.281448834s" podCreationTimestamp="2025-10-02 11:07:03 +0000 UTC" firstStartedPulling="2025-10-02 11:07:05.227548084 +0000 UTC m=+858.543742355" lastFinishedPulling="2025-10-02 11:07:07.79898462 +0000 UTC m=+861.115178911" observedRunningTime="2025-10-02 11:07:08.279206292 +0000 UTC m=+861.595400553" watchObservedRunningTime="2025-10-02 11:07:08.281448834 +0000 UTC m=+861.597643095" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.305816 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s65wp\" (UniqueName: \"kubernetes.io/projected/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-kube-api-access-s65wp\") pod \"certified-operators-7v872\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.373110 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:08 crc kubenswrapper[4783]: I1002 11:07:08.687788 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7v872"] Oct 02 11:07:09 crc kubenswrapper[4783]: I1002 11:07:09.265790 4783 generic.go:334] "Generic (PLEG): container finished" podID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerID="6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d" exitCode=0 Oct 02 11:07:09 crc kubenswrapper[4783]: I1002 11:07:09.265902 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7v872" event={"ID":"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa","Type":"ContainerDied","Data":"6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d"} Oct 02 11:07:09 crc kubenswrapper[4783]: I1002 11:07:09.266096 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7v872" event={"ID":"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa","Type":"ContainerStarted","Data":"6ec9cbf938a4f8ccfc38c2e36fa4b6a4ae5d510b057e89ddaf22c4c293182570"} Oct 02 11:07:11 crc kubenswrapper[4783]: I1002 11:07:11.286751 4783 generic.go:334] "Generic (PLEG): container finished" podID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerID="a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a" exitCode=0 Oct 02 11:07:11 crc kubenswrapper[4783]: I1002 11:07:11.286805 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7v872" event={"ID":"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa","Type":"ContainerDied","Data":"a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a"} Oct 02 11:07:12 crc kubenswrapper[4783]: I1002 11:07:12.296902 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7v872" event={"ID":"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa","Type":"ContainerStarted","Data":"52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670"} Oct 02 11:07:12 crc kubenswrapper[4783]: I1002 11:07:12.882098 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-6cdbc54649-f8h72" Oct 02 11:07:12 crc kubenswrapper[4783]: I1002 11:07:12.900151 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7v872" podStartSLOduration=2.130251827 podStartE2EDuration="4.90013097s" podCreationTimestamp="2025-10-02 11:07:08 +0000 UTC" firstStartedPulling="2025-10-02 11:07:09.26771381 +0000 UTC m=+862.583908071" lastFinishedPulling="2025-10-02 11:07:12.037592953 +0000 UTC m=+865.353787214" observedRunningTime="2025-10-02 11:07:12.315227643 +0000 UTC m=+865.631421914" watchObservedRunningTime="2025-10-02 11:07:12.90013097 +0000 UTC m=+866.216325231" Oct 02 11:07:14 crc kubenswrapper[4783]: I1002 11:07:14.195703 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:14 crc kubenswrapper[4783]: I1002 11:07:14.195769 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:14 crc kubenswrapper[4783]: I1002 11:07:14.247354 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:14 crc kubenswrapper[4783]: I1002 11:07:14.352381 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:15 crc kubenswrapper[4783]: I1002 11:07:15.442242 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cpnvn"] Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.317692 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cpnvn" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="registry-server" containerID="cri-o://6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56" gracePeriod=2 Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.672545 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.794147 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-catalog-content\") pod \"4e64613a-afd1-4feb-bcb3-49dea004a427\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.794217 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r548c\" (UniqueName: \"kubernetes.io/projected/4e64613a-afd1-4feb-bcb3-49dea004a427-kube-api-access-r548c\") pod \"4e64613a-afd1-4feb-bcb3-49dea004a427\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.794247 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-utilities\") pod \"4e64613a-afd1-4feb-bcb3-49dea004a427\" (UID: \"4e64613a-afd1-4feb-bcb3-49dea004a427\") " Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.795332 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-utilities" (OuterVolumeSpecName: "utilities") pod "4e64613a-afd1-4feb-bcb3-49dea004a427" (UID: "4e64613a-afd1-4feb-bcb3-49dea004a427"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.802950 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e64613a-afd1-4feb-bcb3-49dea004a427-kube-api-access-r548c" (OuterVolumeSpecName: "kube-api-access-r548c") pod "4e64613a-afd1-4feb-bcb3-49dea004a427" (UID: "4e64613a-afd1-4feb-bcb3-49dea004a427"). InnerVolumeSpecName "kube-api-access-r548c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.849473 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4e64613a-afd1-4feb-bcb3-49dea004a427" (UID: "4e64613a-afd1-4feb-bcb3-49dea004a427"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.895944 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.896002 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r548c\" (UniqueName: \"kubernetes.io/projected/4e64613a-afd1-4feb-bcb3-49dea004a427-kube-api-access-r548c\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:16 crc kubenswrapper[4783]: I1002 11:07:16.896023 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4e64613a-afd1-4feb-bcb3-49dea004a427-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.327062 4783 generic.go:334] "Generic (PLEG): container finished" podID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerID="6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56" exitCode=0 Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.327115 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cpnvn" event={"ID":"4e64613a-afd1-4feb-bcb3-49dea004a427","Type":"ContainerDied","Data":"6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56"} Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.327163 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cpnvn" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.327189 4783 scope.go:117] "RemoveContainer" containerID="6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.327170 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cpnvn" event={"ID":"4e64613a-afd1-4feb-bcb3-49dea004a427","Type":"ContainerDied","Data":"3c9b46645f09897dd994efe187d0dec4972165d3a73c8065f3e825620cd38f3e"} Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.348850 4783 scope.go:117] "RemoveContainer" containerID="bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.362273 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cpnvn"] Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.369465 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-cpnvn"] Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.383040 4783 scope.go:117] "RemoveContainer" containerID="b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.401626 4783 scope.go:117] "RemoveContainer" containerID="6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56" Oct 02 11:07:17 crc kubenswrapper[4783]: E1002 11:07:17.406030 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56\": container with ID starting with 6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56 not found: ID does not exist" containerID="6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.406091 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56"} err="failed to get container status \"6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56\": rpc error: code = NotFound desc = could not find container \"6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56\": container with ID starting with 6a8057cb196923a6341a84d6e5cda2a6fe02751638ce8d8b6c1401a3c07baf56 not found: ID does not exist" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.406123 4783 scope.go:117] "RemoveContainer" containerID="bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0" Oct 02 11:07:17 crc kubenswrapper[4783]: E1002 11:07:17.407699 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0\": container with ID starting with bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0 not found: ID does not exist" containerID="bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.407733 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0"} err="failed to get container status \"bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0\": rpc error: code = NotFound desc = could not find container \"bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0\": container with ID starting with bbce7bc970c949ce4190ec27824a9efc64d26786ddeb318f419bc38d7b1811c0 not found: ID does not exist" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.407758 4783 scope.go:117] "RemoveContainer" containerID="b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504" Oct 02 11:07:17 crc kubenswrapper[4783]: E1002 11:07:17.409111 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504\": container with ID starting with b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504 not found: ID does not exist" containerID="b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.409191 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504"} err="failed to get container status \"b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504\": rpc error: code = NotFound desc = could not find container \"b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504\": container with ID starting with b1d143346eab25e1f097b1e8a334314e7d84d52a06324793d953981b6349e504 not found: ID does not exist" Oct 02 11:07:17 crc kubenswrapper[4783]: I1002 11:07:17.555672 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" path="/var/lib/kubelet/pods/4e64613a-afd1-4feb-bcb3-49dea004a427/volumes" Oct 02 11:07:18 crc kubenswrapper[4783]: I1002 11:07:18.373816 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:18 crc kubenswrapper[4783]: I1002 11:07:18.374123 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:18 crc kubenswrapper[4783]: I1002 11:07:18.414534 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:19 crc kubenswrapper[4783]: I1002 11:07:19.408002 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:20 crc kubenswrapper[4783]: I1002 11:07:20.643619 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7v872"] Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.250048 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-pl44m"] Oct 02 11:07:21 crc kubenswrapper[4783]: E1002 11:07:21.250542 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="extract-utilities" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.250554 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="extract-utilities" Oct 02 11:07:21 crc kubenswrapper[4783]: E1002 11:07:21.250568 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="registry-server" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.250574 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="registry-server" Oct 02 11:07:21 crc kubenswrapper[4783]: E1002 11:07:21.250585 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="extract-content" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.250591 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="extract-content" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.250696 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e64613a-afd1-4feb-bcb3-49dea004a427" containerName="registry-server" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.251386 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.259767 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl44m"] Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.350161 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7v872" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="registry-server" containerID="cri-o://52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670" gracePeriod=2 Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.359580 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5dzg6\" (UniqueName: \"kubernetes.io/projected/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-kube-api-access-5dzg6\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.359629 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-catalog-content\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.359686 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-utilities\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.472993 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-utilities\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.473100 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5dzg6\" (UniqueName: \"kubernetes.io/projected/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-kube-api-access-5dzg6\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.473137 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-catalog-content\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.473673 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-catalog-content\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.475852 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-utilities\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.503551 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5dzg6\" (UniqueName: \"kubernetes.io/projected/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-kube-api-access-5dzg6\") pod \"redhat-marketplace-pl44m\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:21 crc kubenswrapper[4783]: I1002 11:07:21.591525 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.000860 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl44m"] Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.238086 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.287523 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-catalog-content\") pod \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.287603 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-utilities\") pod \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.287687 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s65wp\" (UniqueName: \"kubernetes.io/projected/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-kube-api-access-s65wp\") pod \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\" (UID: \"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa\") " Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.288831 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-utilities" (OuterVolumeSpecName: "utilities") pod "7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" (UID: "7b4a1225-26ed-41e4-aa91-04e0e4cc70aa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.292939 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-kube-api-access-s65wp" (OuterVolumeSpecName: "kube-api-access-s65wp") pod "7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" (UID: "7b4a1225-26ed-41e4-aa91-04e0e4cc70aa"). InnerVolumeSpecName "kube-api-access-s65wp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.330360 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" (UID: "7b4a1225-26ed-41e4-aa91-04e0e4cc70aa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.357582 4783 generic.go:334] "Generic (PLEG): container finished" podID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerID="fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836" exitCode=0 Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.357654 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl44m" event={"ID":"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f","Type":"ContainerDied","Data":"fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836"} Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.357873 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl44m" event={"ID":"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f","Type":"ContainerStarted","Data":"3e73c332892aa8ab3f20f16b409abb3b70468a743a0f69fd9f4d76b327b53052"} Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.361819 4783 generic.go:334] "Generic (PLEG): container finished" podID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerID="52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670" exitCode=0 Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.361860 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7v872" event={"ID":"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa","Type":"ContainerDied","Data":"52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670"} Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.361886 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7v872" event={"ID":"7b4a1225-26ed-41e4-aa91-04e0e4cc70aa","Type":"ContainerDied","Data":"6ec9cbf938a4f8ccfc38c2e36fa4b6a4ae5d510b057e89ddaf22c4c293182570"} Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.361903 4783 scope.go:117] "RemoveContainer" containerID="52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.362054 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7v872" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.391013 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s65wp\" (UniqueName: \"kubernetes.io/projected/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-kube-api-access-s65wp\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.391055 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.391068 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.391208 4783 scope.go:117] "RemoveContainer" containerID="a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.398450 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7v872"] Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.401886 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7v872"] Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.420580 4783 scope.go:117] "RemoveContainer" containerID="6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.440532 4783 scope.go:117] "RemoveContainer" containerID="52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670" Oct 02 11:07:22 crc kubenswrapper[4783]: E1002 11:07:22.440906 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670\": container with ID starting with 52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670 not found: ID does not exist" containerID="52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.440941 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670"} err="failed to get container status \"52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670\": rpc error: code = NotFound desc = could not find container \"52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670\": container with ID starting with 52407e89ba57f2007f71c4612d874c9c8b6f2ea18c7a7f78d9c661824da08670 not found: ID does not exist" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.440969 4783 scope.go:117] "RemoveContainer" containerID="a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a" Oct 02 11:07:22 crc kubenswrapper[4783]: E1002 11:07:22.441234 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a\": container with ID starting with a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a not found: ID does not exist" containerID="a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.441353 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a"} err="failed to get container status \"a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a\": rpc error: code = NotFound desc = could not find container \"a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a\": container with ID starting with a5797e66295d8c327d7baa79fd3f16bdb4a77b4c6b4518641a9bcb673ae4311a not found: ID does not exist" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.441499 4783 scope.go:117] "RemoveContainer" containerID="6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d" Oct 02 11:07:22 crc kubenswrapper[4783]: E1002 11:07:22.442000 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d\": container with ID starting with 6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d not found: ID does not exist" containerID="6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d" Oct 02 11:07:22 crc kubenswrapper[4783]: I1002 11:07:22.442034 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d"} err="failed to get container status \"6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d\": rpc error: code = NotFound desc = could not find container \"6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d\": container with ID starting with 6e47772ae5e9cdc94e881e56f77eb6ee831595c13f9ef7e46c032fd97179b91d not found: ID does not exist" Oct 02 11:07:23 crc kubenswrapper[4783]: I1002 11:07:23.374443 4783 generic.go:334] "Generic (PLEG): container finished" podID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerID="6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d" exitCode=0 Oct 02 11:07:23 crc kubenswrapper[4783]: I1002 11:07:23.374861 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl44m" event={"ID":"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f","Type":"ContainerDied","Data":"6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d"} Oct 02 11:07:23 crc kubenswrapper[4783]: I1002 11:07:23.554090 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" path="/var/lib/kubelet/pods/7b4a1225-26ed-41e4-aa91-04e0e4cc70aa/volumes" Oct 02 11:07:24 crc kubenswrapper[4783]: I1002 11:07:24.382626 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl44m" event={"ID":"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f","Type":"ContainerStarted","Data":"66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47"} Oct 02 11:07:24 crc kubenswrapper[4783]: I1002 11:07:24.406567 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-pl44m" podStartSLOduration=1.889011679 podStartE2EDuration="3.406548774s" podCreationTimestamp="2025-10-02 11:07:21 +0000 UTC" firstStartedPulling="2025-10-02 11:07:22.359860188 +0000 UTC m=+875.676054449" lastFinishedPulling="2025-10-02 11:07:23.877397273 +0000 UTC m=+877.193591544" observedRunningTime="2025-10-02 11:07:24.402587145 +0000 UTC m=+877.718781406" watchObservedRunningTime="2025-10-02 11:07:24.406548774 +0000 UTC m=+877.722743035" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.876748 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m"] Oct 02 11:07:26 crc kubenswrapper[4783]: E1002 11:07:26.877614 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="extract-utilities" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.877634 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="extract-utilities" Oct 02 11:07:26 crc kubenswrapper[4783]: E1002 11:07:26.877659 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="extract-content" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.877667 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="extract-content" Oct 02 11:07:26 crc kubenswrapper[4783]: E1002 11:07:26.877687 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="registry-server" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.877695 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="registry-server" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.877820 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="7b4a1225-26ed-41e4-aa91-04e0e4cc70aa" containerName="registry-server" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.878754 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.881149 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.900688 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m"] Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.945843 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6ql4t\" (UniqueName: \"kubernetes.io/projected/6083ba68-92cc-4887-995a-f0aa6a582a48-kube-api-access-6ql4t\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.946187 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:26 crc kubenswrapper[4783]: I1002 11:07:26.946582 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.047335 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.047433 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.047456 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6ql4t\" (UniqueName: \"kubernetes.io/projected/6083ba68-92cc-4887-995a-f0aa6a582a48-kube-api-access-6ql4t\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.048221 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-util\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.048221 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-bundle\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.068119 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6ql4t\" (UniqueName: \"kubernetes.io/projected/6083ba68-92cc-4887-995a-f0aa6a582a48-kube-api-access-6ql4t\") pod \"8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.195159 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:27 crc kubenswrapper[4783]: I1002 11:07:27.597084 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m"] Oct 02 11:07:27 crc kubenswrapper[4783]: W1002 11:07:27.602232 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod6083ba68_92cc_4887_995a_f0aa6a582a48.slice/crio-8970eb774173cc521e6d8fff9f60913a0410467a391ee93c5c7ea56f7dba3be9 WatchSource:0}: Error finding container 8970eb774173cc521e6d8fff9f60913a0410467a391ee93c5c7ea56f7dba3be9: Status 404 returned error can't find the container with id 8970eb774173cc521e6d8fff9f60913a0410467a391ee93c5c7ea56f7dba3be9 Oct 02 11:07:28 crc kubenswrapper[4783]: I1002 11:07:28.411302 4783 generic.go:334] "Generic (PLEG): container finished" podID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerID="658f690523a62908a76b5a8e2bd2eda528a611cbc15f6ff69bcc831e2412cf9f" exitCode=0 Oct 02 11:07:28 crc kubenswrapper[4783]: I1002 11:07:28.411375 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" event={"ID":"6083ba68-92cc-4887-995a-f0aa6a582a48","Type":"ContainerDied","Data":"658f690523a62908a76b5a8e2bd2eda528a611cbc15f6ff69bcc831e2412cf9f"} Oct 02 11:07:28 crc kubenswrapper[4783]: I1002 11:07:28.412541 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" event={"ID":"6083ba68-92cc-4887-995a-f0aa6a582a48","Type":"ContainerStarted","Data":"8970eb774173cc521e6d8fff9f60913a0410467a391ee93c5c7ea56f7dba3be9"} Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.348676 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-vjcp4" podUID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" containerName="console" containerID="cri-o://d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975" gracePeriod=15 Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.755676 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-vjcp4_ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7/console/0.log" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.755742 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.885684 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-oauth-config\") pod \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.885781 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-oauth-serving-cert\") pod \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.885887 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-trusted-ca-bundle\") pod \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.885922 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-serving-cert\") pod \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.885969 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-config\") pod \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.886000 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lp9dj\" (UniqueName: \"kubernetes.io/projected/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-kube-api-access-lp9dj\") pod \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.886115 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-service-ca\") pod \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\" (UID: \"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7\") " Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.886888 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" (UID: "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.886919 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-config" (OuterVolumeSpecName: "console-config") pod "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" (UID: "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.887390 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-service-ca" (OuterVolumeSpecName: "service-ca") pod "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" (UID: "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.887683 4783 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.887708 4783 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.887725 4783 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-service-ca\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.888019 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" (UID: "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.892009 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" (UID: "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.892482 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" (UID: "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.892484 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-kube-api-access-lp9dj" (OuterVolumeSpecName: "kube-api-access-lp9dj") pod "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" (UID: "ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7"). InnerVolumeSpecName "kube-api-access-lp9dj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.988704 4783 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.989071 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lp9dj\" (UniqueName: \"kubernetes.io/projected/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-kube-api-access-lp9dj\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.989085 4783 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-console-oauth-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:29 crc kubenswrapper[4783]: I1002 11:07:29.989096 4783 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.427844 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-vjcp4_ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7/console/0.log" Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.427907 4783 generic.go:334] "Generic (PLEG): container finished" podID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" containerID="d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975" exitCode=2 Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.427972 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-vjcp4" event={"ID":"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7","Type":"ContainerDied","Data":"d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975"} Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.427981 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-vjcp4" Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.427997 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-vjcp4" event={"ID":"ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7","Type":"ContainerDied","Data":"846279bfdecc023d4c0572ac40f93de2279d2e05bea57188bbb07e20b97c9f18"} Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.428012 4783 scope.go:117] "RemoveContainer" containerID="d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975" Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.430458 4783 generic.go:334] "Generic (PLEG): container finished" podID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerID="608cb8cb2544a0a2411fd23220e3e6a265d61d80325713101d8c55062d8f63e9" exitCode=0 Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.430515 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" event={"ID":"6083ba68-92cc-4887-995a-f0aa6a582a48","Type":"ContainerDied","Data":"608cb8cb2544a0a2411fd23220e3e6a265d61d80325713101d8c55062d8f63e9"} Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.445959 4783 scope.go:117] "RemoveContainer" containerID="d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975" Oct 02 11:07:30 crc kubenswrapper[4783]: E1002 11:07:30.446471 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975\": container with ID starting with d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975 not found: ID does not exist" containerID="d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975" Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.446513 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975"} err="failed to get container status \"d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975\": rpc error: code = NotFound desc = could not find container \"d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975\": container with ID starting with d82b0bd3625950f23ddacb09922a3528873a7dfb32c61f5d56e069a8c98ca975 not found: ID does not exist" Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.474380 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-vjcp4"] Oct 02 11:07:30 crc kubenswrapper[4783]: I1002 11:07:30.478520 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-vjcp4"] Oct 02 11:07:31 crc kubenswrapper[4783]: I1002 11:07:31.439633 4783 generic.go:334] "Generic (PLEG): container finished" podID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerID="35e71295e53673b6af570fd57914fe8c9408b473846ae37bfd950aec10600c62" exitCode=0 Oct 02 11:07:31 crc kubenswrapper[4783]: I1002 11:07:31.439744 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" event={"ID":"6083ba68-92cc-4887-995a-f0aa6a582a48","Type":"ContainerDied","Data":"35e71295e53673b6af570fd57914fe8c9408b473846ae37bfd950aec10600c62"} Oct 02 11:07:31 crc kubenswrapper[4783]: I1002 11:07:31.555825 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" path="/var/lib/kubelet/pods/ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7/volumes" Oct 02 11:07:31 crc kubenswrapper[4783]: I1002 11:07:31.592038 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:31 crc kubenswrapper[4783]: I1002 11:07:31.592258 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:31 crc kubenswrapper[4783]: I1002 11:07:31.630286 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.505719 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.656653 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.723882 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ql4t\" (UniqueName: \"kubernetes.io/projected/6083ba68-92cc-4887-995a-f0aa6a582a48-kube-api-access-6ql4t\") pod \"6083ba68-92cc-4887-995a-f0aa6a582a48\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.724037 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-util\") pod \"6083ba68-92cc-4887-995a-f0aa6a582a48\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.724094 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-bundle\") pod \"6083ba68-92cc-4887-995a-f0aa6a582a48\" (UID: \"6083ba68-92cc-4887-995a-f0aa6a582a48\") " Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.725610 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-bundle" (OuterVolumeSpecName: "bundle") pod "6083ba68-92cc-4887-995a-f0aa6a582a48" (UID: "6083ba68-92cc-4887-995a-f0aa6a582a48"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.729228 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6083ba68-92cc-4887-995a-f0aa6a582a48-kube-api-access-6ql4t" (OuterVolumeSpecName: "kube-api-access-6ql4t") pod "6083ba68-92cc-4887-995a-f0aa6a582a48" (UID: "6083ba68-92cc-4887-995a-f0aa6a582a48"). InnerVolumeSpecName "kube-api-access-6ql4t". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.742796 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-util" (OuterVolumeSpecName: "util") pod "6083ba68-92cc-4887-995a-f0aa6a582a48" (UID: "6083ba68-92cc-4887-995a-f0aa6a582a48"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.826215 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ql4t\" (UniqueName: \"kubernetes.io/projected/6083ba68-92cc-4887-995a-f0aa6a582a48-kube-api-access-6ql4t\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.826300 4783 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-util\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:32 crc kubenswrapper[4783]: I1002 11:07:32.826317 4783 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/6083ba68-92cc-4887-995a-f0aa6a582a48-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:33 crc kubenswrapper[4783]: I1002 11:07:33.457372 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" Oct 02 11:07:33 crc kubenswrapper[4783]: I1002 11:07:33.457370 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m" event={"ID":"6083ba68-92cc-4887-995a-f0aa6a582a48","Type":"ContainerDied","Data":"8970eb774173cc521e6d8fff9f60913a0410467a391ee93c5c7ea56f7dba3be9"} Oct 02 11:07:33 crc kubenswrapper[4783]: I1002 11:07:33.457482 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8970eb774173cc521e6d8fff9f60913a0410467a391ee93c5c7ea56f7dba3be9" Oct 02 11:07:34 crc kubenswrapper[4783]: I1002 11:07:34.638582 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl44m"] Oct 02 11:07:35 crc kubenswrapper[4783]: I1002 11:07:35.466718 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-pl44m" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="registry-server" containerID="cri-o://66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47" gracePeriod=2 Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.329027 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.371402 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-utilities\") pod \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.371494 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-catalog-content\") pod \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.371615 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5dzg6\" (UniqueName: \"kubernetes.io/projected/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-kube-api-access-5dzg6\") pod \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\" (UID: \"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f\") " Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.372356 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-utilities" (OuterVolumeSpecName: "utilities") pod "e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" (UID: "e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.377529 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-kube-api-access-5dzg6" (OuterVolumeSpecName: "kube-api-access-5dzg6") pod "e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" (UID: "e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f"). InnerVolumeSpecName "kube-api-access-5dzg6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.387939 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" (UID: "e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.480840 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.480866 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.480876 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5dzg6\" (UniqueName: \"kubernetes.io/projected/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f-kube-api-access-5dzg6\") on node \"crc\" DevicePath \"\"" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.483880 4783 generic.go:334] "Generic (PLEG): container finished" podID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerID="66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47" exitCode=0 Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.483944 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-pl44m" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.483925 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl44m" event={"ID":"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f","Type":"ContainerDied","Data":"66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47"} Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.484070 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-pl44m" event={"ID":"e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f","Type":"ContainerDied","Data":"3e73c332892aa8ab3f20f16b409abb3b70468a743a0f69fd9f4d76b327b53052"} Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.484095 4783 scope.go:117] "RemoveContainer" containerID="66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.508631 4783 scope.go:117] "RemoveContainer" containerID="6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.513599 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl44m"] Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.517025 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-pl44m"] Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.529034 4783 scope.go:117] "RemoveContainer" containerID="fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.544136 4783 scope.go:117] "RemoveContainer" containerID="66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47" Oct 02 11:07:36 crc kubenswrapper[4783]: E1002 11:07:36.544595 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47\": container with ID starting with 66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47 not found: ID does not exist" containerID="66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.544626 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47"} err="failed to get container status \"66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47\": rpc error: code = NotFound desc = could not find container \"66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47\": container with ID starting with 66c685ee6d709fb332162168296f1e4c415208060541cb1f61c1591419e04c47 not found: ID does not exist" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.544651 4783 scope.go:117] "RemoveContainer" containerID="6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d" Oct 02 11:07:36 crc kubenswrapper[4783]: E1002 11:07:36.544900 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d\": container with ID starting with 6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d not found: ID does not exist" containerID="6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.544920 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d"} err="failed to get container status \"6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d\": rpc error: code = NotFound desc = could not find container \"6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d\": container with ID starting with 6a31dc78616ff45a6590a3bc3a944c3f0a78dbc9aa07178c3528baeb58fca31d not found: ID does not exist" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.544934 4783 scope.go:117] "RemoveContainer" containerID="fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836" Oct 02 11:07:36 crc kubenswrapper[4783]: E1002 11:07:36.545220 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836\": container with ID starting with fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836 not found: ID does not exist" containerID="fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836" Oct 02 11:07:36 crc kubenswrapper[4783]: I1002 11:07:36.545270 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836"} err="failed to get container status \"fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836\": rpc error: code = NotFound desc = could not find container \"fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836\": container with ID starting with fc981d6c82f4fbb88dbbff5f3c077334c0c5d925f25db5c84073962afdeb8836 not found: ID does not exist" Oct 02 11:07:37 crc kubenswrapper[4783]: I1002 11:07:37.556290 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" path="/var/lib/kubelet/pods/e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f/volumes" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229436 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n"] Oct 02 11:07:40 crc kubenswrapper[4783]: E1002 11:07:40.229836 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerName="extract" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229847 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerName="extract" Oct 02 11:07:40 crc kubenswrapper[4783]: E1002 11:07:40.229858 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerName="util" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229864 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerName="util" Oct 02 11:07:40 crc kubenswrapper[4783]: E1002 11:07:40.229877 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="registry-server" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229883 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="registry-server" Oct 02 11:07:40 crc kubenswrapper[4783]: E1002 11:07:40.229893 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerName="pull" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229898 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerName="pull" Oct 02 11:07:40 crc kubenswrapper[4783]: E1002 11:07:40.229910 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="extract-utilities" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229916 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="extract-utilities" Oct 02 11:07:40 crc kubenswrapper[4783]: E1002 11:07:40.229926 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="extract-content" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229932 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="extract-content" Oct 02 11:07:40 crc kubenswrapper[4783]: E1002 11:07:40.229939 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" containerName="console" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.229944 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" containerName="console" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.230028 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6083ba68-92cc-4887-995a-f0aa6a582a48" containerName="extract" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.230042 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad8e90b0-3fd9-40f3-8e9f-7163e208a6d7" containerName="console" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.230050 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="e1c4d3d2-d7a1-46fc-826f-3525f8bc7c3f" containerName="registry-server" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.230404 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.245069 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.245109 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.245147 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.245757 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.247375 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-wzvdn" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.266054 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n"] Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.325873 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgd8v\" (UniqueName: \"kubernetes.io/projected/624d75e0-0672-4797-8791-25096bfbf553-kube-api-access-cgd8v\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.325920 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/624d75e0-0672-4797-8791-25096bfbf553-apiservice-cert\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.326093 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/624d75e0-0672-4797-8791-25096bfbf553-webhook-cert\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.427919 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/624d75e0-0672-4797-8791-25096bfbf553-webhook-cert\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.428873 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgd8v\" (UniqueName: \"kubernetes.io/projected/624d75e0-0672-4797-8791-25096bfbf553-kube-api-access-cgd8v\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.428913 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/624d75e0-0672-4797-8791-25096bfbf553-apiservice-cert\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.432822 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/624d75e0-0672-4797-8791-25096bfbf553-webhook-cert\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.433514 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/624d75e0-0672-4797-8791-25096bfbf553-apiservice-cert\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.453717 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgd8v\" (UniqueName: \"kubernetes.io/projected/624d75e0-0672-4797-8791-25096bfbf553-kube-api-access-cgd8v\") pod \"metallb-operator-controller-manager-c4c4dd5fd-sqv6n\" (UID: \"624d75e0-0672-4797-8791-25096bfbf553\") " pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.538645 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc"] Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.539572 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.544289 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.551170 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.552492 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-b4qnt" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.554529 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.619727 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc"] Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.631451 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fdb3e990-3206-4ed2-8df2-86695dadf11f-apiservice-cert\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.631505 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fdb3e990-3206-4ed2-8df2-86695dadf11f-webhook-cert\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.631570 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nx74\" (UniqueName: \"kubernetes.io/projected/fdb3e990-3206-4ed2-8df2-86695dadf11f-kube-api-access-2nx74\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.733165 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nx74\" (UniqueName: \"kubernetes.io/projected/fdb3e990-3206-4ed2-8df2-86695dadf11f-kube-api-access-2nx74\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.733219 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fdb3e990-3206-4ed2-8df2-86695dadf11f-apiservice-cert\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.733249 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fdb3e990-3206-4ed2-8df2-86695dadf11f-webhook-cert\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.737543 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/fdb3e990-3206-4ed2-8df2-86695dadf11f-apiservice-cert\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.745210 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/fdb3e990-3206-4ed2-8df2-86695dadf11f-webhook-cert\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.765079 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nx74\" (UniqueName: \"kubernetes.io/projected/fdb3e990-3206-4ed2-8df2-86695dadf11f-kube-api-access-2nx74\") pod \"metallb-operator-webhook-server-9d574b6b6-j97tc\" (UID: \"fdb3e990-3206-4ed2-8df2-86695dadf11f\") " pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.843612 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n"] Oct 02 11:07:40 crc kubenswrapper[4783]: I1002 11:07:40.863886 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:41 crc kubenswrapper[4783]: I1002 11:07:41.121466 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc"] Oct 02 11:07:41 crc kubenswrapper[4783]: W1002 11:07:41.124526 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfdb3e990_3206_4ed2_8df2_86695dadf11f.slice/crio-7492da20e4fbfb1c2f836d25a5349a6b2a87c06d961a892a7a712e87aff12eb5 WatchSource:0}: Error finding container 7492da20e4fbfb1c2f836d25a5349a6b2a87c06d961a892a7a712e87aff12eb5: Status 404 returned error can't find the container with id 7492da20e4fbfb1c2f836d25a5349a6b2a87c06d961a892a7a712e87aff12eb5 Oct 02 11:07:41 crc kubenswrapper[4783]: I1002 11:07:41.510328 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" event={"ID":"fdb3e990-3206-4ed2-8df2-86695dadf11f","Type":"ContainerStarted","Data":"7492da20e4fbfb1c2f836d25a5349a6b2a87c06d961a892a7a712e87aff12eb5"} Oct 02 11:07:41 crc kubenswrapper[4783]: I1002 11:07:41.511210 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" event={"ID":"624d75e0-0672-4797-8791-25096bfbf553","Type":"ContainerStarted","Data":"a75a52a0183b3d39ccb7a8c29bdd56b5804d77e689d38893b099f4f3e112d1f2"} Oct 02 11:07:46 crc kubenswrapper[4783]: I1002 11:07:46.542860 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" event={"ID":"624d75e0-0672-4797-8791-25096bfbf553","Type":"ContainerStarted","Data":"adc8ee8297ca0db78d9820d18301518b37fdb7279f88a1b51d2205fa7f92e05b"} Oct 02 11:07:46 crc kubenswrapper[4783]: I1002 11:07:46.543389 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:07:46 crc kubenswrapper[4783]: I1002 11:07:46.544037 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" event={"ID":"fdb3e990-3206-4ed2-8df2-86695dadf11f","Type":"ContainerStarted","Data":"a408d169f337a8be81cfa0ba8b0fdea07f3ac320c6d9d2aac8db2fbcd1e3c6ed"} Oct 02 11:07:46 crc kubenswrapper[4783]: I1002 11:07:46.544403 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:07:46 crc kubenswrapper[4783]: I1002 11:07:46.597177 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" podStartSLOduration=1.655667945 podStartE2EDuration="6.597162006s" podCreationTimestamp="2025-10-02 11:07:40 +0000 UTC" firstStartedPulling="2025-10-02 11:07:40.85085295 +0000 UTC m=+894.167047211" lastFinishedPulling="2025-10-02 11:07:45.792347011 +0000 UTC m=+899.108541272" observedRunningTime="2025-10-02 11:07:46.568522717 +0000 UTC m=+899.884716998" watchObservedRunningTime="2025-10-02 11:07:46.597162006 +0000 UTC m=+899.913356267" Oct 02 11:07:46 crc kubenswrapper[4783]: I1002 11:07:46.599476 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" podStartSLOduration=1.915814334 podStartE2EDuration="6.59946869s" podCreationTimestamp="2025-10-02 11:07:40 +0000 UTC" firstStartedPulling="2025-10-02 11:07:41.127511563 +0000 UTC m=+894.443705824" lastFinishedPulling="2025-10-02 11:07:45.811165919 +0000 UTC m=+899.127360180" observedRunningTime="2025-10-02 11:07:46.590202754 +0000 UTC m=+899.906397016" watchObservedRunningTime="2025-10-02 11:07:46.59946869 +0000 UTC m=+899.915662951" Oct 02 11:07:51 crc kubenswrapper[4783]: I1002 11:07:51.513456 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:07:51 crc kubenswrapper[4783]: I1002 11:07:51.513811 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:08:00 crc kubenswrapper[4783]: I1002 11:08:00.868108 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-9d574b6b6-j97tc" Oct 02 11:08:20 crc kubenswrapper[4783]: I1002 11:08:20.546671 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.408349 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-lp8kh"] Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.411399 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.414711 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.414723 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-5w6pq" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.416638 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.432917 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65"] Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.434091 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.442180 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.450364 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65"] Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.513222 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.513281 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.526948 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-x5j7x"] Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.528073 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.531635 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.531660 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.531699 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.532763 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-qwvrc" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545756 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-metrics\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545796 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh67g\" (UniqueName: \"kubernetes.io/projected/d9b837d5-d442-41aa-b6be-2fea310d330c-kube-api-access-hh67g\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545820 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-startup\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545847 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-sockets\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545873 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-reloader\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545917 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-64xnc\" (UniqueName: \"kubernetes.io/projected/6d68c40b-106d-478a-ad63-90dc1bbaf434-kube-api-access-64xnc\") pod \"frr-k8s-webhook-server-64bf5d555-g9k65\" (UID: \"6d68c40b-106d-478a-ad63-90dc1bbaf434\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545955 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d9b837d5-d442-41aa-b6be-2fea310d330c-metrics-certs\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.545994 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-conf\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.546019 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6d68c40b-106d-478a-ad63-90dc1bbaf434-cert\") pod \"frr-k8s-webhook-server-64bf5d555-g9k65\" (UID: \"6d68c40b-106d-478a-ad63-90dc1bbaf434\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.555975 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-68d546b9d8-d47hb"] Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.557118 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.559038 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.565545 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-d47hb"] Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647391 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6d68c40b-106d-478a-ad63-90dc1bbaf434-cert\") pod \"frr-k8s-webhook-server-64bf5d555-g9k65\" (UID: \"6d68c40b-106d-478a-ad63-90dc1bbaf434\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647454 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-metrics\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647477 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh67g\" (UniqueName: \"kubernetes.io/projected/d9b837d5-d442-41aa-b6be-2fea310d330c-kube-api-access-hh67g\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647497 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-startup\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647523 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bs87j\" (UniqueName: \"kubernetes.io/projected/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-kube-api-access-bs87j\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647546 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8e0102e1-45d3-4a6f-a080-55c51c71d864-metallb-excludel2\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647578 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-sockets\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647622 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-reloader\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647658 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-metrics-certs\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647685 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-metrics-certs\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647728 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-64xnc\" (UniqueName: \"kubernetes.io/projected/6d68c40b-106d-478a-ad63-90dc1bbaf434-kube-api-access-64xnc\") pod \"frr-k8s-webhook-server-64bf5d555-g9k65\" (UID: \"6d68c40b-106d-478a-ad63-90dc1bbaf434\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647752 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mj5hd\" (UniqueName: \"kubernetes.io/projected/8e0102e1-45d3-4a6f-a080-55c51c71d864-kube-api-access-mj5hd\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647821 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d9b837d5-d442-41aa-b6be-2fea310d330c-metrics-certs\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647887 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-cert\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647909 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.647936 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-conf\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.648360 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-conf\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: E1002 11:08:21.649119 4783 secret.go:188] Couldn't get secret metallb-system/frr-k8s-webhook-server-cert: secret "frr-k8s-webhook-server-cert" not found Oct 02 11:08:21 crc kubenswrapper[4783]: E1002 11:08:21.649174 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6d68c40b-106d-478a-ad63-90dc1bbaf434-cert podName:6d68c40b-106d-478a-ad63-90dc1bbaf434 nodeName:}" failed. No retries permitted until 2025-10-02 11:08:22.149155669 +0000 UTC m=+935.465349930 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/6d68c40b-106d-478a-ad63-90dc1bbaf434-cert") pod "frr-k8s-webhook-server-64bf5d555-g9k65" (UID: "6d68c40b-106d-478a-ad63-90dc1bbaf434") : secret "frr-k8s-webhook-server-cert" not found Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.649395 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-metrics\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.650305 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-sockets\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.650514 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/d9b837d5-d442-41aa-b6be-2fea310d330c-frr-startup\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.650863 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/d9b837d5-d442-41aa-b6be-2fea310d330c-reloader\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.671437 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/d9b837d5-d442-41aa-b6be-2fea310d330c-metrics-certs\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.698213 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh67g\" (UniqueName: \"kubernetes.io/projected/d9b837d5-d442-41aa-b6be-2fea310d330c-kube-api-access-hh67g\") pod \"frr-k8s-lp8kh\" (UID: \"d9b837d5-d442-41aa-b6be-2fea310d330c\") " pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.699096 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-64xnc\" (UniqueName: \"kubernetes.io/projected/6d68c40b-106d-478a-ad63-90dc1bbaf434-kube-api-access-64xnc\") pod \"frr-k8s-webhook-server-64bf5d555-g9k65\" (UID: \"6d68c40b-106d-478a-ad63-90dc1bbaf434\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.730982 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.749256 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-metrics-certs\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.749312 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mj5hd\" (UniqueName: \"kubernetes.io/projected/8e0102e1-45d3-4a6f-a080-55c51c71d864-kube-api-access-mj5hd\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.749353 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-cert\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.749369 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.749433 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bs87j\" (UniqueName: \"kubernetes.io/projected/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-kube-api-access-bs87j\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.749450 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8e0102e1-45d3-4a6f-a080-55c51c71d864-metallb-excludel2\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.749471 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-metrics-certs\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: E1002 11:08:21.749689 4783 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 02 11:08:21 crc kubenswrapper[4783]: E1002 11:08:21.749767 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist podName:8e0102e1-45d3-4a6f-a080-55c51c71d864 nodeName:}" failed. No retries permitted until 2025-10-02 11:08:22.249745651 +0000 UTC m=+935.565939902 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist") pod "speaker-x5j7x" (UID: "8e0102e1-45d3-4a6f-a080-55c51c71d864") : secret "metallb-memberlist" not found Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.750724 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/8e0102e1-45d3-4a6f-a080-55c51c71d864-metallb-excludel2\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.755954 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-metrics-certs\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.764840 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-metrics-certs\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.765070 4783 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.771618 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bs87j\" (UniqueName: \"kubernetes.io/projected/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-kube-api-access-bs87j\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.774894 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/c3dfee2e-30ff-42e9-b095-6315d8ab67d8-cert\") pod \"controller-68d546b9d8-d47hb\" (UID: \"c3dfee2e-30ff-42e9-b095-6315d8ab67d8\") " pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.786989 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mj5hd\" (UniqueName: \"kubernetes.io/projected/8e0102e1-45d3-4a6f-a080-55c51c71d864-kube-api-access-mj5hd\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:21 crc kubenswrapper[4783]: I1002 11:08:21.874855 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.153722 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6d68c40b-106d-478a-ad63-90dc1bbaf434-cert\") pod \"frr-k8s-webhook-server-64bf5d555-g9k65\" (UID: \"6d68c40b-106d-478a-ad63-90dc1bbaf434\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.158588 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6d68c40b-106d-478a-ad63-90dc1bbaf434-cert\") pod \"frr-k8s-webhook-server-64bf5d555-g9k65\" (UID: \"6d68c40b-106d-478a-ad63-90dc1bbaf434\") " pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.255336 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:22 crc kubenswrapper[4783]: E1002 11:08:22.255483 4783 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Oct 02 11:08:22 crc kubenswrapper[4783]: E1002 11:08:22.255559 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist podName:8e0102e1-45d3-4a6f-a080-55c51c71d864 nodeName:}" failed. No retries permitted until 2025-10-02 11:08:23.255541187 +0000 UTC m=+936.571735448 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist") pod "speaker-x5j7x" (UID: "8e0102e1-45d3-4a6f-a080-55c51c71d864") : secret "metallb-memberlist" not found Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.281191 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-68d546b9d8-d47hb"] Oct 02 11:08:22 crc kubenswrapper[4783]: W1002 11:08:22.287025 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc3dfee2e_30ff_42e9_b095_6315d8ab67d8.slice/crio-e4470a463adff15c88e7622aecd5519ad7a56858be79d8494f4d64242156f8b7 WatchSource:0}: Error finding container e4470a463adff15c88e7622aecd5519ad7a56858be79d8494f4d64242156f8b7: Status 404 returned error can't find the container with id e4470a463adff15c88e7622aecd5519ad7a56858be79d8494f4d64242156f8b7 Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.349605 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.546731 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65"] Oct 02 11:08:22 crc kubenswrapper[4783]: W1002 11:08:22.549823 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6d68c40b_106d_478a_ad63_90dc1bbaf434.slice/crio-20dc81b316bbf89f1934bcf3aa128f2d2d62f824d11135acdcc8defb2f2240c3 WatchSource:0}: Error finding container 20dc81b316bbf89f1934bcf3aa128f2d2d62f824d11135acdcc8defb2f2240c3: Status 404 returned error can't find the container with id 20dc81b316bbf89f1934bcf3aa128f2d2d62f824d11135acdcc8defb2f2240c3 Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.751489 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-d47hb" event={"ID":"c3dfee2e-30ff-42e9-b095-6315d8ab67d8","Type":"ContainerStarted","Data":"a7920de5cfa813a6cbc5c30973b716918f74728a491eb764cb4d1d7c78ce9da3"} Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.751537 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-d47hb" event={"ID":"c3dfee2e-30ff-42e9-b095-6315d8ab67d8","Type":"ContainerStarted","Data":"16d1209ee3fc51ca19f715a7452718588f3b8ad4b628ec772c101bb1ac453a4a"} Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.751547 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-68d546b9d8-d47hb" event={"ID":"c3dfee2e-30ff-42e9-b095-6315d8ab67d8","Type":"ContainerStarted","Data":"e4470a463adff15c88e7622aecd5519ad7a56858be79d8494f4d64242156f8b7"} Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.751635 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.752263 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerStarted","Data":"c852a26aad9a4fefcf3382f374cb0a14bb6a520fd344873b89e6bb6bdc92da4a"} Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.754169 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" event={"ID":"6d68c40b-106d-478a-ad63-90dc1bbaf434","Type":"ContainerStarted","Data":"20dc81b316bbf89f1934bcf3aa128f2d2d62f824d11135acdcc8defb2f2240c3"} Oct 02 11:08:22 crc kubenswrapper[4783]: I1002 11:08:22.771228 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-68d546b9d8-d47hb" podStartSLOduration=1.771211216 podStartE2EDuration="1.771211216s" podCreationTimestamp="2025-10-02 11:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:08:22.769799897 +0000 UTC m=+936.085994178" watchObservedRunningTime="2025-10-02 11:08:22.771211216 +0000 UTC m=+936.087405477" Oct 02 11:08:23 crc kubenswrapper[4783]: I1002 11:08:23.275365 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:23 crc kubenswrapper[4783]: I1002 11:08:23.281032 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/8e0102e1-45d3-4a6f-a080-55c51c71d864-memberlist\") pod \"speaker-x5j7x\" (UID: \"8e0102e1-45d3-4a6f-a080-55c51c71d864\") " pod="metallb-system/speaker-x5j7x" Oct 02 11:08:23 crc kubenswrapper[4783]: I1002 11:08:23.342824 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-x5j7x" Oct 02 11:08:23 crc kubenswrapper[4783]: W1002 11:08:23.389879 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8e0102e1_45d3_4a6f_a080_55c51c71d864.slice/crio-2dc1c69d809325c15d5c4601eaaf652029ab571d925614fe0f6228ad283345eb WatchSource:0}: Error finding container 2dc1c69d809325c15d5c4601eaaf652029ab571d925614fe0f6228ad283345eb: Status 404 returned error can't find the container with id 2dc1c69d809325c15d5c4601eaaf652029ab571d925614fe0f6228ad283345eb Oct 02 11:08:23 crc kubenswrapper[4783]: I1002 11:08:23.761109 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x5j7x" event={"ID":"8e0102e1-45d3-4a6f-a080-55c51c71d864","Type":"ContainerStarted","Data":"2dc1c69d809325c15d5c4601eaaf652029ab571d925614fe0f6228ad283345eb"} Oct 02 11:08:24 crc kubenswrapper[4783]: I1002 11:08:24.772825 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x5j7x" event={"ID":"8e0102e1-45d3-4a6f-a080-55c51c71d864","Type":"ContainerStarted","Data":"6c41da4ba84f60cddc62a7ebb76b992b9b71033643549708fc87b33593e0a7af"} Oct 02 11:08:24 crc kubenswrapper[4783]: I1002 11:08:24.773149 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-x5j7x" event={"ID":"8e0102e1-45d3-4a6f-a080-55c51c71d864","Type":"ContainerStarted","Data":"e9589b413e0334624dc5423e1194356894504b7353d5fee8b16266061e1dc04c"} Oct 02 11:08:24 crc kubenswrapper[4783]: I1002 11:08:24.773190 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-x5j7x" Oct 02 11:08:24 crc kubenswrapper[4783]: I1002 11:08:24.800271 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-x5j7x" podStartSLOduration=3.800255746 podStartE2EDuration="3.800255746s" podCreationTimestamp="2025-10-02 11:08:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:08:24.796144633 +0000 UTC m=+938.112338894" watchObservedRunningTime="2025-10-02 11:08:24.800255746 +0000 UTC m=+938.116450007" Oct 02 11:08:30 crc kubenswrapper[4783]: I1002 11:08:30.826061 4783 generic.go:334] "Generic (PLEG): container finished" podID="d9b837d5-d442-41aa-b6be-2fea310d330c" containerID="4c22051c2169aa277ce3a9459453615fc4d7b83dabc32f13eb918f6965a75f53" exitCode=0 Oct 02 11:08:30 crc kubenswrapper[4783]: I1002 11:08:30.826132 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerDied","Data":"4c22051c2169aa277ce3a9459453615fc4d7b83dabc32f13eb918f6965a75f53"} Oct 02 11:08:30 crc kubenswrapper[4783]: I1002 11:08:30.828108 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" event={"ID":"6d68c40b-106d-478a-ad63-90dc1bbaf434","Type":"ContainerStarted","Data":"75ce45d9b2b524e36da4f97168a66ebecff069cb8ddbb450d192ce8aaf47846c"} Oct 02 11:08:30 crc kubenswrapper[4783]: I1002 11:08:30.828309 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:30 crc kubenswrapper[4783]: I1002 11:08:30.883477 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" podStartSLOduration=1.988985577 podStartE2EDuration="9.883456355s" podCreationTimestamp="2025-10-02 11:08:21 +0000 UTC" firstStartedPulling="2025-10-02 11:08:22.551878103 +0000 UTC m=+935.868072364" lastFinishedPulling="2025-10-02 11:08:30.446348881 +0000 UTC m=+943.762543142" observedRunningTime="2025-10-02 11:08:30.881593674 +0000 UTC m=+944.197787955" watchObservedRunningTime="2025-10-02 11:08:30.883456355 +0000 UTC m=+944.199650616" Oct 02 11:08:31 crc kubenswrapper[4783]: I1002 11:08:31.837780 4783 generic.go:334] "Generic (PLEG): container finished" podID="d9b837d5-d442-41aa-b6be-2fea310d330c" containerID="f1b65002801182f112d12eb827b30d8ac9cf795486713d9bd2dbd5ee3ee259d0" exitCode=0 Oct 02 11:08:31 crc kubenswrapper[4783]: I1002 11:08:31.837832 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerDied","Data":"f1b65002801182f112d12eb827b30d8ac9cf795486713d9bd2dbd5ee3ee259d0"} Oct 02 11:08:32 crc kubenswrapper[4783]: I1002 11:08:32.846662 4783 generic.go:334] "Generic (PLEG): container finished" podID="d9b837d5-d442-41aa-b6be-2fea310d330c" containerID="825cb725181b0d34a939b2665d87d782d466d096e656c689c884694131bd5ca1" exitCode=0 Oct 02 11:08:32 crc kubenswrapper[4783]: I1002 11:08:32.846746 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerDied","Data":"825cb725181b0d34a939b2665d87d782d466d096e656c689c884694131bd5ca1"} Oct 02 11:08:33 crc kubenswrapper[4783]: I1002 11:08:33.347214 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-x5j7x" Oct 02 11:08:33 crc kubenswrapper[4783]: I1002 11:08:33.855978 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerStarted","Data":"a03d33579ccdc5ec2b0845762bb79124e1aaf6574380b1ffc24be2eb313b40c5"} Oct 02 11:08:33 crc kubenswrapper[4783]: I1002 11:08:33.856018 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerStarted","Data":"9c09b9e3c0599fb7a4e326c0972fdc87c67ca8833fac7ab64237fc4e351c2b72"} Oct 02 11:08:33 crc kubenswrapper[4783]: I1002 11:08:33.856028 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerStarted","Data":"e1ac8f05ca85f474b51f36f6374b9ef594bbb2dd38b77c2cd0fc6b48da94d0e8"} Oct 02 11:08:34 crc kubenswrapper[4783]: I1002 11:08:34.873453 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerStarted","Data":"74fce9b2b122d970dad5673ecc78262b983c47a54e265ea872932940dd36ffdd"} Oct 02 11:08:34 crc kubenswrapper[4783]: I1002 11:08:34.873791 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerStarted","Data":"76a3c133affa2ad238bcafb19b2e263d21fc6cca0a707cf3d2d427d3fb1b8d64"} Oct 02 11:08:34 crc kubenswrapper[4783]: I1002 11:08:34.873804 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-lp8kh" event={"ID":"d9b837d5-d442-41aa-b6be-2fea310d330c","Type":"ContainerStarted","Data":"01112e31ddb14b37697fe7b81c7a9e715c3fd966692ca6e47f83e88c0f6a5ae3"} Oct 02 11:08:34 crc kubenswrapper[4783]: I1002 11:08:34.874745 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:34 crc kubenswrapper[4783]: I1002 11:08:34.905749 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-lp8kh" podStartSLOduration=5.419396431 podStartE2EDuration="13.905727797s" podCreationTimestamp="2025-10-02 11:08:21 +0000 UTC" firstStartedPulling="2025-10-02 11:08:21.913881494 +0000 UTC m=+935.230075755" lastFinishedPulling="2025-10-02 11:08:30.40021285 +0000 UTC m=+943.716407121" observedRunningTime="2025-10-02 11:08:34.903694231 +0000 UTC m=+948.219888502" watchObservedRunningTime="2025-10-02 11:08:34.905727797 +0000 UTC m=+948.221922098" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.370897 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-zpvn9"] Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.371803 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zpvn9" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.376329 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-lh9xh" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.377375 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.379052 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.386435 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zpvn9"] Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.459792 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6hbhd\" (UniqueName: \"kubernetes.io/projected/701cdf5f-f27f-460e-ba16-beca35b1c34d-kube-api-access-6hbhd\") pod \"openstack-operator-index-zpvn9\" (UID: \"701cdf5f-f27f-460e-ba16-beca35b1c34d\") " pod="openstack-operators/openstack-operator-index-zpvn9" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.560979 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6hbhd\" (UniqueName: \"kubernetes.io/projected/701cdf5f-f27f-460e-ba16-beca35b1c34d-kube-api-access-6hbhd\") pod \"openstack-operator-index-zpvn9\" (UID: \"701cdf5f-f27f-460e-ba16-beca35b1c34d\") " pod="openstack-operators/openstack-operator-index-zpvn9" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.579350 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6hbhd\" (UniqueName: \"kubernetes.io/projected/701cdf5f-f27f-460e-ba16-beca35b1c34d-kube-api-access-6hbhd\") pod \"openstack-operator-index-zpvn9\" (UID: \"701cdf5f-f27f-460e-ba16-beca35b1c34d\") " pod="openstack-operators/openstack-operator-index-zpvn9" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.690226 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zpvn9" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.732190 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:36 crc kubenswrapper[4783]: I1002 11:08:36.780877 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:37 crc kubenswrapper[4783]: I1002 11:08:37.135965 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-zpvn9"] Oct 02 11:08:37 crc kubenswrapper[4783]: W1002 11:08:37.142202 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod701cdf5f_f27f_460e_ba16_beca35b1c34d.slice/crio-8c0487e94f3ddb7b0d5547198fb873c084ef1305dc2073441d63b0c7f7b68dae WatchSource:0}: Error finding container 8c0487e94f3ddb7b0d5547198fb873c084ef1305dc2073441d63b0c7f7b68dae: Status 404 returned error can't find the container with id 8c0487e94f3ddb7b0d5547198fb873c084ef1305dc2073441d63b0c7f7b68dae Oct 02 11:08:37 crc kubenswrapper[4783]: I1002 11:08:37.892357 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zpvn9" event={"ID":"701cdf5f-f27f-460e-ba16-beca35b1c34d","Type":"ContainerStarted","Data":"8c0487e94f3ddb7b0d5547198fb873c084ef1305dc2073441d63b0c7f7b68dae"} Oct 02 11:08:39 crc kubenswrapper[4783]: I1002 11:08:39.744162 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-zpvn9"] Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.344389 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-98sw6"] Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.345215 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.360988 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-98sw6"] Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.408243 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qqddj\" (UniqueName: \"kubernetes.io/projected/999b4d1a-2ac1-42e6-8799-d52d3f34341a-kube-api-access-qqddj\") pod \"openstack-operator-index-98sw6\" (UID: \"999b4d1a-2ac1-42e6-8799-d52d3f34341a\") " pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.509557 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qqddj\" (UniqueName: \"kubernetes.io/projected/999b4d1a-2ac1-42e6-8799-d52d3f34341a-kube-api-access-qqddj\") pod \"openstack-operator-index-98sw6\" (UID: \"999b4d1a-2ac1-42e6-8799-d52d3f34341a\") " pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.528686 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qqddj\" (UniqueName: \"kubernetes.io/projected/999b4d1a-2ac1-42e6-8799-d52d3f34341a-kube-api-access-qqddj\") pod \"openstack-operator-index-98sw6\" (UID: \"999b4d1a-2ac1-42e6-8799-d52d3f34341a\") " pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.672230 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.849816 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-98sw6"] Oct 02 11:08:40 crc kubenswrapper[4783]: W1002 11:08:40.855743 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod999b4d1a_2ac1_42e6_8799_d52d3f34341a.slice/crio-8cf7d4fd166e9e34b8f3bb9edf68455764117ed07c5f936081e66f80b1da04ed WatchSource:0}: Error finding container 8cf7d4fd166e9e34b8f3bb9edf68455764117ed07c5f936081e66f80b1da04ed: Status 404 returned error can't find the container with id 8cf7d4fd166e9e34b8f3bb9edf68455764117ed07c5f936081e66f80b1da04ed Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.909290 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-98sw6" event={"ID":"999b4d1a-2ac1-42e6-8799-d52d3f34341a","Type":"ContainerStarted","Data":"8cf7d4fd166e9e34b8f3bb9edf68455764117ed07c5f936081e66f80b1da04ed"} Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.911524 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zpvn9" event={"ID":"701cdf5f-f27f-460e-ba16-beca35b1c34d","Type":"ContainerStarted","Data":"2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f"} Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.911962 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-zpvn9" podUID="701cdf5f-f27f-460e-ba16-beca35b1c34d" containerName="registry-server" containerID="cri-o://2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f" gracePeriod=2 Oct 02 11:08:40 crc kubenswrapper[4783]: I1002 11:08:40.938242 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-zpvn9" podStartSLOduration=1.5927699469999999 podStartE2EDuration="4.938218469s" podCreationTimestamp="2025-10-02 11:08:36 +0000 UTC" firstStartedPulling="2025-10-02 11:08:37.144283769 +0000 UTC m=+950.460478030" lastFinishedPulling="2025-10-02 11:08:40.489732291 +0000 UTC m=+953.805926552" observedRunningTime="2025-10-02 11:08:40.928140642 +0000 UTC m=+954.244334903" watchObservedRunningTime="2025-10-02 11:08:40.938218469 +0000 UTC m=+954.254412730" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.186990 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zpvn9" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.319819 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6hbhd\" (UniqueName: \"kubernetes.io/projected/701cdf5f-f27f-460e-ba16-beca35b1c34d-kube-api-access-6hbhd\") pod \"701cdf5f-f27f-460e-ba16-beca35b1c34d\" (UID: \"701cdf5f-f27f-460e-ba16-beca35b1c34d\") " Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.326155 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/701cdf5f-f27f-460e-ba16-beca35b1c34d-kube-api-access-6hbhd" (OuterVolumeSpecName: "kube-api-access-6hbhd") pod "701cdf5f-f27f-460e-ba16-beca35b1c34d" (UID: "701cdf5f-f27f-460e-ba16-beca35b1c34d"). InnerVolumeSpecName "kube-api-access-6hbhd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.422077 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6hbhd\" (UniqueName: \"kubernetes.io/projected/701cdf5f-f27f-460e-ba16-beca35b1c34d-kube-api-access-6hbhd\") on node \"crc\" DevicePath \"\"" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.880836 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-68d546b9d8-d47hb" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.919116 4783 generic.go:334] "Generic (PLEG): container finished" podID="701cdf5f-f27f-460e-ba16-beca35b1c34d" containerID="2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f" exitCode=0 Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.919184 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-zpvn9" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.919234 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zpvn9" event={"ID":"701cdf5f-f27f-460e-ba16-beca35b1c34d","Type":"ContainerDied","Data":"2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f"} Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.919297 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-zpvn9" event={"ID":"701cdf5f-f27f-460e-ba16-beca35b1c34d","Type":"ContainerDied","Data":"8c0487e94f3ddb7b0d5547198fb873c084ef1305dc2073441d63b0c7f7b68dae"} Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.919343 4783 scope.go:117] "RemoveContainer" containerID="2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.921482 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-98sw6" event={"ID":"999b4d1a-2ac1-42e6-8799-d52d3f34341a","Type":"ContainerStarted","Data":"6a34e5b28c07352d8a49db51ba60667246fd720cd2e513a481be597414247766"} Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.941794 4783 scope.go:117] "RemoveContainer" containerID="2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f" Oct 02 11:08:41 crc kubenswrapper[4783]: E1002 11:08:41.942338 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f\": container with ID starting with 2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f not found: ID does not exist" containerID="2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.942389 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f"} err="failed to get container status \"2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f\": rpc error: code = NotFound desc = could not find container \"2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f\": container with ID starting with 2bfa44f98aca38a7ec232296a166ab936c29276a49cbb0a75fefa6ad8304738f not found: ID does not exist" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.979617 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-98sw6" podStartSLOduration=1.928859997 podStartE2EDuration="1.979596915s" podCreationTimestamp="2025-10-02 11:08:40 +0000 UTC" firstStartedPulling="2025-10-02 11:08:40.86058274 +0000 UTC m=+954.176777001" lastFinishedPulling="2025-10-02 11:08:40.911319658 +0000 UTC m=+954.227513919" observedRunningTime="2025-10-02 11:08:41.948745865 +0000 UTC m=+955.264940126" watchObservedRunningTime="2025-10-02 11:08:41.979596915 +0000 UTC m=+955.295791196" Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.980329 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-zpvn9"] Oct 02 11:08:41 crc kubenswrapper[4783]: I1002 11:08:41.984701 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-zpvn9"] Oct 02 11:08:42 crc kubenswrapper[4783]: I1002 11:08:42.355925 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-64bf5d555-g9k65" Oct 02 11:08:43 crc kubenswrapper[4783]: I1002 11:08:43.552758 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="701cdf5f-f27f-460e-ba16-beca35b1c34d" path="/var/lib/kubelet/pods/701cdf5f-f27f-460e-ba16-beca35b1c34d/volumes" Oct 02 11:08:50 crc kubenswrapper[4783]: I1002 11:08:50.672839 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:50 crc kubenswrapper[4783]: I1002 11:08:50.673475 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:50 crc kubenswrapper[4783]: I1002 11:08:50.705284 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:51 crc kubenswrapper[4783]: I1002 11:08:51.021954 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-98sw6" Oct 02 11:08:51 crc kubenswrapper[4783]: I1002 11:08:51.513127 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:08:51 crc kubenswrapper[4783]: I1002 11:08:51.513182 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:08:51 crc kubenswrapper[4783]: I1002 11:08:51.513218 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:08:51 crc kubenswrapper[4783]: I1002 11:08:51.513818 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"71430381fae101618ae61e001dc6e16fd7ea79336b50af0d653500a93eaed8e8"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:08:51 crc kubenswrapper[4783]: I1002 11:08:51.513873 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://71430381fae101618ae61e001dc6e16fd7ea79336b50af0d653500a93eaed8e8" gracePeriod=600 Oct 02 11:08:51 crc kubenswrapper[4783]: I1002 11:08:51.734941 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-lp8kh" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.008022 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="71430381fae101618ae61e001dc6e16fd7ea79336b50af0d653500a93eaed8e8" exitCode=0 Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.008091 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"71430381fae101618ae61e001dc6e16fd7ea79336b50af0d653500a93eaed8e8"} Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.008427 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"9eb9fd07e1e6e14820a34c4d9ea92acb1e0177338f9204e820b47da5ec49b7d3"} Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.008451 4783 scope.go:117] "RemoveContainer" containerID="3d77624023f180014e3aae2d3836e37ae2fefe61ff0ccee775ec35e399cb240c" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.029844 4783 scope.go:117] "RemoveContainer" containerID="c7b837b3146fd661f532d6de35193ba6facf1c1681e01ea3c274bb1093a5666b" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.191732 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9"] Oct 02 11:08:52 crc kubenswrapper[4783]: E1002 11:08:52.192022 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="701cdf5f-f27f-460e-ba16-beca35b1c34d" containerName="registry-server" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.192042 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="701cdf5f-f27f-460e-ba16-beca35b1c34d" containerName="registry-server" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.192189 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="701cdf5f-f27f-460e-ba16-beca35b1c34d" containerName="registry-server" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.193150 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.196527 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-qjdq4" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.205758 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9"] Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.272100 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xncp\" (UniqueName: \"kubernetes.io/projected/04a57d36-edbe-46bc-b8b8-d884500f159a-kube-api-access-8xncp\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.272233 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-bundle\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.272269 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-util\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.373042 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-bundle\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.373465 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-util\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.373509 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xncp\" (UniqueName: \"kubernetes.io/projected/04a57d36-edbe-46bc-b8b8-d884500f159a-kube-api-access-8xncp\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.373631 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-bundle\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.373977 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-util\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.393916 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xncp\" (UniqueName: \"kubernetes.io/projected/04a57d36-edbe-46bc-b8b8-d884500f159a-kube-api-access-8xncp\") pod \"157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.509611 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:52 crc kubenswrapper[4783]: I1002 11:08:52.705962 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9"] Oct 02 11:08:52 crc kubenswrapper[4783]: W1002 11:08:52.711362 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04a57d36_edbe_46bc_b8b8_d884500f159a.slice/crio-086113699aa8ec0e16a7d750f42d1d18488bf5e4925658abfd70e1ad80f2d5f5 WatchSource:0}: Error finding container 086113699aa8ec0e16a7d750f42d1d18488bf5e4925658abfd70e1ad80f2d5f5: Status 404 returned error can't find the container with id 086113699aa8ec0e16a7d750f42d1d18488bf5e4925658abfd70e1ad80f2d5f5 Oct 02 11:08:53 crc kubenswrapper[4783]: I1002 11:08:53.015050 4783 generic.go:334] "Generic (PLEG): container finished" podID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerID="dd92632c15f42b33859db4a31178fdc465500bf21eb27a6f0ec8a47cb8f77d1c" exitCode=0 Oct 02 11:08:53 crc kubenswrapper[4783]: I1002 11:08:53.015123 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" event={"ID":"04a57d36-edbe-46bc-b8b8-d884500f159a","Type":"ContainerDied","Data":"dd92632c15f42b33859db4a31178fdc465500bf21eb27a6f0ec8a47cb8f77d1c"} Oct 02 11:08:53 crc kubenswrapper[4783]: I1002 11:08:53.015965 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" event={"ID":"04a57d36-edbe-46bc-b8b8-d884500f159a","Type":"ContainerStarted","Data":"086113699aa8ec0e16a7d750f42d1d18488bf5e4925658abfd70e1ad80f2d5f5"} Oct 02 11:08:54 crc kubenswrapper[4783]: I1002 11:08:54.029454 4783 generic.go:334] "Generic (PLEG): container finished" podID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerID="c7ff0c18163389b3ba475551068d4c84e4b5121e92ff8ad118086a81c1a7e15a" exitCode=0 Oct 02 11:08:54 crc kubenswrapper[4783]: I1002 11:08:54.029513 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" event={"ID":"04a57d36-edbe-46bc-b8b8-d884500f159a","Type":"ContainerDied","Data":"c7ff0c18163389b3ba475551068d4c84e4b5121e92ff8ad118086a81c1a7e15a"} Oct 02 11:08:55 crc kubenswrapper[4783]: I1002 11:08:55.039300 4783 generic.go:334] "Generic (PLEG): container finished" podID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerID="91a9e3832a199bf8281c282e20f4c750b15d7c9163f0e9eb21f431d717290e39" exitCode=0 Oct 02 11:08:55 crc kubenswrapper[4783]: I1002 11:08:55.039483 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" event={"ID":"04a57d36-edbe-46bc-b8b8-d884500f159a","Type":"ContainerDied","Data":"91a9e3832a199bf8281c282e20f4c750b15d7c9163f0e9eb21f431d717290e39"} Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.335149 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.430119 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-util\") pod \"04a57d36-edbe-46bc-b8b8-d884500f159a\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.430233 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8xncp\" (UniqueName: \"kubernetes.io/projected/04a57d36-edbe-46bc-b8b8-d884500f159a-kube-api-access-8xncp\") pod \"04a57d36-edbe-46bc-b8b8-d884500f159a\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.430270 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-bundle\") pod \"04a57d36-edbe-46bc-b8b8-d884500f159a\" (UID: \"04a57d36-edbe-46bc-b8b8-d884500f159a\") " Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.431206 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-bundle" (OuterVolumeSpecName: "bundle") pod "04a57d36-edbe-46bc-b8b8-d884500f159a" (UID: "04a57d36-edbe-46bc-b8b8-d884500f159a"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.435356 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/04a57d36-edbe-46bc-b8b8-d884500f159a-kube-api-access-8xncp" (OuterVolumeSpecName: "kube-api-access-8xncp") pod "04a57d36-edbe-46bc-b8b8-d884500f159a" (UID: "04a57d36-edbe-46bc-b8b8-d884500f159a"). InnerVolumeSpecName "kube-api-access-8xncp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.444353 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-util" (OuterVolumeSpecName: "util") pod "04a57d36-edbe-46bc-b8b8-d884500f159a" (UID: "04a57d36-edbe-46bc-b8b8-d884500f159a"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.531625 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8xncp\" (UniqueName: \"kubernetes.io/projected/04a57d36-edbe-46bc-b8b8-d884500f159a-kube-api-access-8xncp\") on node \"crc\" DevicePath \"\"" Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.531670 4783 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:08:56 crc kubenswrapper[4783]: I1002 11:08:56.531682 4783 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/04a57d36-edbe-46bc-b8b8-d884500f159a-util\") on node \"crc\" DevicePath \"\"" Oct 02 11:08:57 crc kubenswrapper[4783]: I1002 11:08:57.057567 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" event={"ID":"04a57d36-edbe-46bc-b8b8-d884500f159a","Type":"ContainerDied","Data":"086113699aa8ec0e16a7d750f42d1d18488bf5e4925658abfd70e1ad80f2d5f5"} Oct 02 11:08:57 crc kubenswrapper[4783]: I1002 11:08:57.057612 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9" Oct 02 11:08:57 crc kubenswrapper[4783]: I1002 11:08:57.057624 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="086113699aa8ec0e16a7d750f42d1d18488bf5e4925658abfd70e1ad80f2d5f5" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.830385 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px"] Oct 02 11:09:04 crc kubenswrapper[4783]: E1002 11:09:04.831035 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerName="util" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.831052 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerName="util" Oct 02 11:09:04 crc kubenswrapper[4783]: E1002 11:09:04.831082 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerName="pull" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.831089 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerName="pull" Oct 02 11:09:04 crc kubenswrapper[4783]: E1002 11:09:04.831101 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerName="extract" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.831108 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerName="extract" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.831255 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="04a57d36-edbe-46bc-b8b8-d884500f159a" containerName="extract" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.832041 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.836776 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-operator-dockercfg-2px6b" Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.864815 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px"] Oct 02 11:09:04 crc kubenswrapper[4783]: I1002 11:09:04.934980 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jqjzt\" (UniqueName: \"kubernetes.io/projected/0d2a789d-053a-480b-a29e-3c0afef53319-kube-api-access-jqjzt\") pod \"openstack-operator-controller-operator-cc764bd77-wk2px\" (UID: \"0d2a789d-053a-480b-a29e-3c0afef53319\") " pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" Oct 02 11:09:05 crc kubenswrapper[4783]: I1002 11:09:05.035838 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jqjzt\" (UniqueName: \"kubernetes.io/projected/0d2a789d-053a-480b-a29e-3c0afef53319-kube-api-access-jqjzt\") pod \"openstack-operator-controller-operator-cc764bd77-wk2px\" (UID: \"0d2a789d-053a-480b-a29e-3c0afef53319\") " pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" Oct 02 11:09:05 crc kubenswrapper[4783]: I1002 11:09:05.077465 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jqjzt\" (UniqueName: \"kubernetes.io/projected/0d2a789d-053a-480b-a29e-3c0afef53319-kube-api-access-jqjzt\") pod \"openstack-operator-controller-operator-cc764bd77-wk2px\" (UID: \"0d2a789d-053a-480b-a29e-3c0afef53319\") " pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" Oct 02 11:09:05 crc kubenswrapper[4783]: I1002 11:09:05.152179 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" Oct 02 11:09:05 crc kubenswrapper[4783]: I1002 11:09:05.363296 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px"] Oct 02 11:09:06 crc kubenswrapper[4783]: I1002 11:09:06.116846 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" event={"ID":"0d2a789d-053a-480b-a29e-3c0afef53319","Type":"ContainerStarted","Data":"de4d18a08d1916a75ceb5392fdbd8e7e458df7370ee7a6f7151f6358b56e1f0a"} Oct 02 11:09:10 crc kubenswrapper[4783]: I1002 11:09:10.142676 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" event={"ID":"0d2a789d-053a-480b-a29e-3c0afef53319","Type":"ContainerStarted","Data":"0a91a8e8d756b977ef377c582ec9257b43a01b39f0d596fd0f4a72d8576094ff"} Oct 02 11:09:12 crc kubenswrapper[4783]: I1002 11:09:12.157281 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" event={"ID":"0d2a789d-053a-480b-a29e-3c0afef53319","Type":"ContainerStarted","Data":"98243ade1db517ebe85a3ba0532ad014a6acf16110707a34faaeccad53c916db"} Oct 02 11:09:12 crc kubenswrapper[4783]: I1002 11:09:12.157626 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" Oct 02 11:09:15 crc kubenswrapper[4783]: I1002 11:09:15.155207 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" Oct 02 11:09:15 crc kubenswrapper[4783]: I1002 11:09:15.192739 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-operator-cc764bd77-wk2px" podStartSLOduration=5.076961095 podStartE2EDuration="11.192722263s" podCreationTimestamp="2025-10-02 11:09:04 +0000 UTC" firstStartedPulling="2025-10-02 11:09:05.372567678 +0000 UTC m=+978.688761929" lastFinishedPulling="2025-10-02 11:09:11.488328836 +0000 UTC m=+984.804523097" observedRunningTime="2025-10-02 11:09:12.193787969 +0000 UTC m=+985.509982230" watchObservedRunningTime="2025-10-02 11:09:15.192722263 +0000 UTC m=+988.508916524" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.211572 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.212847 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.217512 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-tnj86" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.226091 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.228014 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.236896 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.242125 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-9cnb5" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.244006 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.244939 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.248755 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-dz9vj" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.254704 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.261476 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.281350 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.282477 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.284935 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-lszp5" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.285557 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.286568 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.289101 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-kbxqm" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.293301 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q8hnx\" (UniqueName: \"kubernetes.io/projected/ac5ac6e0-2521-4daa-8c0e-091f13b7a406-kube-api-access-q8hnx\") pod \"cinder-operator-controller-manager-644bddb6d8-fkhwr\" (UID: \"ac5ac6e0-2521-4daa-8c0e-091f13b7a406\") " pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.293546 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w4mth\" (UniqueName: \"kubernetes.io/projected/87d34dfd-25c7-4f4c-bbda-058e38a01994-kube-api-access-w4mth\") pod \"barbican-operator-controller-manager-6ff8b75857-x7698\" (UID: \"87d34dfd-25c7-4f4c-bbda-058e38a01994\") " pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.293586 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cmtcz\" (UniqueName: \"kubernetes.io/projected/52d93c12-b942-4ad3-935a-b555026711ea-kube-api-access-cmtcz\") pod \"designate-operator-controller-manager-84f4f7b77b-wt52p\" (UID: \"52d93c12-b942-4ad3-935a-b555026711ea\") " pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.335120 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.347076 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.404741 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.405756 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w4mth\" (UniqueName: \"kubernetes.io/projected/87d34dfd-25c7-4f4c-bbda-058e38a01994-kube-api-access-w4mth\") pod \"barbican-operator-controller-manager-6ff8b75857-x7698\" (UID: \"87d34dfd-25c7-4f4c-bbda-058e38a01994\") " pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.405957 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cmtcz\" (UniqueName: \"kubernetes.io/projected/52d93c12-b942-4ad3-935a-b555026711ea-kube-api-access-cmtcz\") pod \"designate-operator-controller-manager-84f4f7b77b-wt52p\" (UID: \"52d93c12-b942-4ad3-935a-b555026711ea\") " pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.406118 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q8hnx\" (UniqueName: \"kubernetes.io/projected/ac5ac6e0-2521-4daa-8c0e-091f13b7a406-kube-api-access-q8hnx\") pod \"cinder-operator-controller-manager-644bddb6d8-fkhwr\" (UID: \"ac5ac6e0-2521-4daa-8c0e-091f13b7a406\") " pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.415306 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vfg6\" (UniqueName: \"kubernetes.io/projected/24d5bc39-4bbb-47af-94e2-222118ccdabb-kube-api-access-6vfg6\") pod \"heat-operator-controller-manager-5d889d78cf-pqhp6\" (UID: \"24d5bc39-4bbb-47af-94e2-222118ccdabb\") " pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.415643 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2g7sd\" (UniqueName: \"kubernetes.io/projected/8041f454-2294-478d-b4cd-ffa769b8f709-kube-api-access-2g7sd\") pod \"glance-operator-controller-manager-84958c4d49-92cr8\" (UID: \"8041f454-2294-478d-b4cd-ffa769b8f709\") " pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.418003 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.445096 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-chldd" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.457585 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.458583 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.462537 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w4mth\" (UniqueName: \"kubernetes.io/projected/87d34dfd-25c7-4f4c-bbda-058e38a01994-kube-api-access-w4mth\") pod \"barbican-operator-controller-manager-6ff8b75857-x7698\" (UID: \"87d34dfd-25c7-4f4c-bbda-058e38a01994\") " pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.465880 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q8hnx\" (UniqueName: \"kubernetes.io/projected/ac5ac6e0-2521-4daa-8c0e-091f13b7a406-kube-api-access-q8hnx\") pod \"cinder-operator-controller-manager-644bddb6d8-fkhwr\" (UID: \"ac5ac6e0-2521-4daa-8c0e-091f13b7a406\") " pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.470051 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.472922 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.473087 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cmtcz\" (UniqueName: \"kubernetes.io/projected/52d93c12-b942-4ad3-935a-b555026711ea-kube-api-access-cmtcz\") pod \"designate-operator-controller-manager-84f4f7b77b-wt52p\" (UID: \"52d93c12-b942-4ad3-935a-b555026711ea\") " pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.473112 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-kvx96" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.478735 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.484745 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-j2449" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.488622 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.492656 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.508935 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.509905 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.513814 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.516196 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-xvhwd" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.518187 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vfg6\" (UniqueName: \"kubernetes.io/projected/24d5bc39-4bbb-47af-94e2-222118ccdabb-kube-api-access-6vfg6\") pod \"heat-operator-controller-manager-5d889d78cf-pqhp6\" (UID: \"24d5bc39-4bbb-47af-94e2-222118ccdabb\") " pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.518299 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g75m9\" (UniqueName: \"kubernetes.io/projected/018c6179-6e65-461a-b457-a5eb949672de-kube-api-access-g75m9\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.518328 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.521666 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2g7sd\" (UniqueName: \"kubernetes.io/projected/8041f454-2294-478d-b4cd-ffa769b8f709-kube-api-access-2g7sd\") pod \"glance-operator-controller-manager-84958c4d49-92cr8\" (UID: \"8041f454-2294-478d-b4cd-ffa769b8f709\") " pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.521786 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6zv88\" (UniqueName: \"kubernetes.io/projected/ae698aff-8888-48b6-9c37-cbcea6e7bc6e-kube-api-access-6zv88\") pod \"horizon-operator-controller-manager-9f4696d94-9pqks\" (UID: \"ae698aff-8888-48b6-9c37-cbcea6e7bc6e\") " pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.524003 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.526223 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-88c7-w5766"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.527883 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.528554 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.531766 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.536344 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.542521 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-c9srg" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.544594 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-88c7-w5766"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.549798 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-ncz9c" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.553183 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.574123 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.575265 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.576255 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.580933 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-tzmxk" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.623274 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.624601 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.625105 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2g7sd\" (UniqueName: \"kubernetes.io/projected/8041f454-2294-478d-b4cd-ffa769b8f709-kube-api-access-2g7sd\") pod \"glance-operator-controller-manager-84958c4d49-92cr8\" (UID: \"8041f454-2294-478d-b4cd-ffa769b8f709\") " pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.627626 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-ztt2z" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.628015 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6zv88\" (UniqueName: \"kubernetes.io/projected/ae698aff-8888-48b6-9c37-cbcea6e7bc6e-kube-api-access-6zv88\") pod \"horizon-operator-controller-manager-9f4696d94-9pqks\" (UID: \"ae698aff-8888-48b6-9c37-cbcea6e7bc6e\") " pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.642207 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7pp2r\" (UniqueName: \"kubernetes.io/projected/6cf94630-4019-4863-80be-6e1088cf3407-kube-api-access-7pp2r\") pod \"keystone-operator-controller-manager-5bd55b4bff-qtqcj\" (UID: \"6cf94630-4019-4863-80be-6e1088cf3407\") " pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.642455 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z55c9\" (UniqueName: \"kubernetes.io/projected/c788b295-31bf-496b-8b4f-fccc3ff3be17-kube-api-access-z55c9\") pod \"ironic-operator-controller-manager-5cd4858477-hs7gk\" (UID: \"c788b295-31bf-496b-8b4f-fccc3ff3be17\") " pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.642613 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g75m9\" (UniqueName: \"kubernetes.io/projected/018c6179-6e65-461a-b457-a5eb949672de-kube-api-access-g75m9\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.642733 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.642990 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5mnj\" (UniqueName: \"kubernetes.io/projected/1f8c1065-586c-4dce-a4ce-fc262d00063d-kube-api-access-p5mnj\") pod \"manila-operator-controller-manager-6d68dbc695-qg9vc\" (UID: \"1f8c1065-586c-4dce-a4ce-fc262d00063d\") " pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.643238 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qml6x\" (UniqueName: \"kubernetes.io/projected/56a432ad-7000-45e3-ac88-9ebd3a1eb3a7-kube-api-access-qml6x\") pod \"mariadb-operator-controller-manager-88c7-w5766\" (UID: \"56a432ad-7000-45e3-ac88-9ebd3a1eb3a7\") " pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.643405 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ss6j5\" (UniqueName: \"kubernetes.io/projected/82a47dd2-82ef-4fb9-9216-d14a2332683f-kube-api-access-ss6j5\") pod \"nova-operator-controller-manager-64cd67b5cb-kfxk9\" (UID: \"82a47dd2-82ef-4fb9-9216-d14a2332683f\") " pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" Oct 02 11:09:32 crc kubenswrapper[4783]: E1002 11:09:32.642928 4783 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Oct 02 11:09:32 crc kubenswrapper[4783]: E1002 11:09:32.652328 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert podName:018c6179-6e65-461a-b457-a5eb949672de nodeName:}" failed. No retries permitted until 2025-10-02 11:09:33.152312061 +0000 UTC m=+1006.468506322 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert") pod "infra-operator-controller-manager-9d6c5db85-6bk6b" (UID: "018c6179-6e65-461a-b457-a5eb949672de") : secret "infra-operator-webhook-server-cert" not found Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.652272 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vfg6\" (UniqueName: \"kubernetes.io/projected/24d5bc39-4bbb-47af-94e2-222118ccdabb-kube-api-access-6vfg6\") pod \"heat-operator-controller-manager-5d889d78cf-pqhp6\" (UID: \"24d5bc39-4bbb-47af-94e2-222118ccdabb\") " pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.661984 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.696271 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6zv88\" (UniqueName: \"kubernetes.io/projected/ae698aff-8888-48b6-9c37-cbcea6e7bc6e-kube-api-access-6zv88\") pod \"horizon-operator-controller-manager-9f4696d94-9pqks\" (UID: \"ae698aff-8888-48b6-9c37-cbcea6e7bc6e\") " pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.710888 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.721625 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g75m9\" (UniqueName: \"kubernetes.io/projected/018c6179-6e65-461a-b457-a5eb949672de-kube-api-access-g75m9\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.745537 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5mnj\" (UniqueName: \"kubernetes.io/projected/1f8c1065-586c-4dce-a4ce-fc262d00063d-kube-api-access-p5mnj\") pod \"manila-operator-controller-manager-6d68dbc695-qg9vc\" (UID: \"1f8c1065-586c-4dce-a4ce-fc262d00063d\") " pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.745588 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qml6x\" (UniqueName: \"kubernetes.io/projected/56a432ad-7000-45e3-ac88-9ebd3a1eb3a7-kube-api-access-qml6x\") pod \"mariadb-operator-controller-manager-88c7-w5766\" (UID: \"56a432ad-7000-45e3-ac88-9ebd3a1eb3a7\") " pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.745611 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ss6j5\" (UniqueName: \"kubernetes.io/projected/82a47dd2-82ef-4fb9-9216-d14a2332683f-kube-api-access-ss6j5\") pod \"nova-operator-controller-manager-64cd67b5cb-kfxk9\" (UID: \"82a47dd2-82ef-4fb9-9216-d14a2332683f\") " pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.745659 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwpvp\" (UniqueName: \"kubernetes.io/projected/a57ed846-184b-49fd-af9f-efc1fbba98e5-kube-api-access-dwpvp\") pod \"neutron-operator-controller-manager-849d5b9b84-6xwmm\" (UID: \"a57ed846-184b-49fd-af9f-efc1fbba98e5\") " pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.745681 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7pp2r\" (UniqueName: \"kubernetes.io/projected/6cf94630-4019-4863-80be-6e1088cf3407-kube-api-access-7pp2r\") pod \"keystone-operator-controller-manager-5bd55b4bff-qtqcj\" (UID: \"6cf94630-4019-4863-80be-6e1088cf3407\") " pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.745717 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z55c9\" (UniqueName: \"kubernetes.io/projected/c788b295-31bf-496b-8b4f-fccc3ff3be17-kube-api-access-z55c9\") pod \"ironic-operator-controller-manager-5cd4858477-hs7gk\" (UID: \"c788b295-31bf-496b-8b4f-fccc3ff3be17\") " pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.761670 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.762773 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.773513 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.782686 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-8rvsh" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.789308 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5mnj\" (UniqueName: \"kubernetes.io/projected/1f8c1065-586c-4dce-a4ce-fc262d00063d-kube-api-access-p5mnj\") pod \"manila-operator-controller-manager-6d68dbc695-qg9vc\" (UID: \"1f8c1065-586c-4dce-a4ce-fc262d00063d\") " pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.804624 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.826003 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z55c9\" (UniqueName: \"kubernetes.io/projected/c788b295-31bf-496b-8b4f-fccc3ff3be17-kube-api-access-z55c9\") pod \"ironic-operator-controller-manager-5cd4858477-hs7gk\" (UID: \"c788b295-31bf-496b-8b4f-fccc3ff3be17\") " pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.826099 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ss6j5\" (UniqueName: \"kubernetes.io/projected/82a47dd2-82ef-4fb9-9216-d14a2332683f-kube-api-access-ss6j5\") pod \"nova-operator-controller-manager-64cd67b5cb-kfxk9\" (UID: \"82a47dd2-82ef-4fb9-9216-d14a2332683f\") " pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.826923 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7pp2r\" (UniqueName: \"kubernetes.io/projected/6cf94630-4019-4863-80be-6e1088cf3407-kube-api-access-7pp2r\") pod \"keystone-operator-controller-manager-5bd55b4bff-qtqcj\" (UID: \"6cf94630-4019-4863-80be-6e1088cf3407\") " pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.836511 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.842151 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.851707 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-2xzz8" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.865530 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.866542 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.867007 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.869353 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-s94fs" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.874356 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwpvp\" (UniqueName: \"kubernetes.io/projected/a57ed846-184b-49fd-af9f-efc1fbba98e5-kube-api-access-dwpvp\") pod \"neutron-operator-controller-manager-849d5b9b84-6xwmm\" (UID: \"a57ed846-184b-49fd-af9f-efc1fbba98e5\") " pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.874475 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/139b1abc-ddcf-4ccc-83f4-deb58a682b0c-kube-api-access-t9zs9\") pod \"octavia-operator-controller-manager-7b787867f4-k7m89\" (UID: \"139b1abc-ddcf-4ccc-83f4-deb58a682b0c\") " pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.888528 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.889872 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.900592 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.915374 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-k62l7" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.915539 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.915741 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.916096 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.933612 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qml6x\" (UniqueName: \"kubernetes.io/projected/56a432ad-7000-45e3-ac88-9ebd3a1eb3a7-kube-api-access-qml6x\") pod \"mariadb-operator-controller-manager-88c7-w5766\" (UID: \"56a432ad-7000-45e3-ac88-9ebd3a1eb3a7\") " pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.935659 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwpvp\" (UniqueName: \"kubernetes.io/projected/a57ed846-184b-49fd-af9f-efc1fbba98e5-kube-api-access-dwpvp\") pod \"neutron-operator-controller-manager-849d5b9b84-6xwmm\" (UID: \"a57ed846-184b-49fd-af9f-efc1fbba98e5\") " pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.941163 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.944558 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.947282 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.960926 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.961889 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.967350 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-bzrld" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.970003 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.976943 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4q2wk\" (UniqueName: \"kubernetes.io/projected/ebfbf228-ed71-4331-96be-8105d5029d2c-kube-api-access-4q2wk\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.978394 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/139b1abc-ddcf-4ccc-83f4-deb58a682b0c-kube-api-access-t9zs9\") pod \"octavia-operator-controller-manager-7b787867f4-k7m89\" (UID: \"139b1abc-ddcf-4ccc-83f4-deb58a682b0c\") " pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.978493 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfwss\" (UniqueName: \"kubernetes.io/projected/81c143d8-5f4b-4baf-9cb1-6f34110f4833-kube-api-access-tfwss\") pod \"ovn-operator-controller-manager-9976ff44c-xbh4h\" (UID: \"81c143d8-5f4b-4baf-9cb1-6f34110f4833\") " pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.978635 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.978717 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7kkt4\" (UniqueName: \"kubernetes.io/projected/7d7c0a51-805f-422d-bb62-75a53f9a80d0-kube-api-access-7kkt4\") pod \"placement-operator-controller-manager-589c58c6c-qkgtm\" (UID: \"7d7c0a51-805f-422d-bb62-75a53f9a80d0\") " pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.986027 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5"] Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.987059 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" Oct 02 11:09:32 crc kubenswrapper[4783]: I1002 11:09:32.993585 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-j2tdc" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.027192 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.074628 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.075123 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9zs9\" (UniqueName: \"kubernetes.io/projected/139b1abc-ddcf-4ccc-83f4-deb58a682b0c-kube-api-access-t9zs9\") pod \"octavia-operator-controller-manager-7b787867f4-k7m89\" (UID: \"139b1abc-ddcf-4ccc-83f4-deb58a682b0c\") " pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.080938 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7kkt4\" (UniqueName: \"kubernetes.io/projected/7d7c0a51-805f-422d-bb62-75a53f9a80d0-kube-api-access-7kkt4\") pod \"placement-operator-controller-manager-589c58c6c-qkgtm\" (UID: \"7d7c0a51-805f-422d-bb62-75a53f9a80d0\") " pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.080994 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqkvr\" (UniqueName: \"kubernetes.io/projected/96ca8cc4-c237-43b0-ae6b-7cc86a183f46-kube-api-access-vqkvr\") pod \"swift-operator-controller-manager-84d6b4b759-9fvns\" (UID: \"96ca8cc4-c237-43b0-ae6b-7cc86a183f46\") " pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.081036 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4q2wk\" (UniqueName: \"kubernetes.io/projected/ebfbf228-ed71-4331-96be-8105d5029d2c-kube-api-access-4q2wk\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.081094 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfwss\" (UniqueName: \"kubernetes.io/projected/81c143d8-5f4b-4baf-9cb1-6f34110f4833-kube-api-access-tfwss\") pod \"ovn-operator-controller-manager-9976ff44c-xbh4h\" (UID: \"81c143d8-5f4b-4baf-9cb1-6f34110f4833\") " pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.081134 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6h29c\" (UniqueName: \"kubernetes.io/projected/650ecd19-902d-48f8-bea4-2c7f120885dc-kube-api-access-6h29c\") pod \"telemetry-operator-controller-manager-b8d54b5d7-b5rx5\" (UID: \"650ecd19-902d-48f8-bea4-2c7f120885dc\") " pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.081170 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.081470 4783 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.081525 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert podName:ebfbf228-ed71-4331-96be-8105d5029d2c nodeName:}" failed. No retries permitted until 2025-10-02 11:09:33.581510779 +0000 UTC m=+1006.897705040 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert") pod "openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" (UID: "ebfbf228-ed71-4331-96be-8105d5029d2c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.090040 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.104312 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.124052 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-85777745bb-5b98j"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.127853 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.128134 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfwss\" (UniqueName: \"kubernetes.io/projected/81c143d8-5f4b-4baf-9cb1-6f34110f4833-kube-api-access-tfwss\") pod \"ovn-operator-controller-manager-9976ff44c-xbh4h\" (UID: \"81c143d8-5f4b-4baf-9cb1-6f34110f4833\") " pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.146114 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7kkt4\" (UniqueName: \"kubernetes.io/projected/7d7c0a51-805f-422d-bb62-75a53f9a80d0-kube-api-access-7kkt4\") pod \"placement-operator-controller-manager-589c58c6c-qkgtm\" (UID: \"7d7c0a51-805f-422d-bb62-75a53f9a80d0\") " pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.146658 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.157218 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.167117 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-k6bpw" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.168650 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-85777745bb-5b98j"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.178949 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4q2wk\" (UniqueName: \"kubernetes.io/projected/ebfbf228-ed71-4331-96be-8105d5029d2c-kube-api-access-4q2wk\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.187984 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6h29c\" (UniqueName: \"kubernetes.io/projected/650ecd19-902d-48f8-bea4-2c7f120885dc-kube-api-access-6h29c\") pod \"telemetry-operator-controller-manager-b8d54b5d7-b5rx5\" (UID: \"650ecd19-902d-48f8-bea4-2c7f120885dc\") " pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.188500 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqkvr\" (UniqueName: \"kubernetes.io/projected/96ca8cc4-c237-43b0-ae6b-7cc86a183f46-kube-api-access-vqkvr\") pod \"swift-operator-controller-manager-84d6b4b759-9fvns\" (UID: \"96ca8cc4-c237-43b0-ae6b-7cc86a183f46\") " pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.189287 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqh8f\" (UniqueName: \"kubernetes.io/projected/0a24bee6-3dd0-47b3-881a-b5cff49d1e1e-kube-api-access-hqh8f\") pod \"test-operator-controller-manager-85777745bb-5b98j\" (UID: \"0a24bee6-3dd0-47b3-881a-b5cff49d1e1e\") " pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.189350 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.189561 4783 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.189600 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert podName:018c6179-6e65-461a-b457-a5eb949672de nodeName:}" failed. No retries permitted until 2025-10-02 11:09:34.189587573 +0000 UTC m=+1007.505781834 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert") pod "infra-operator-controller-manager-9d6c5db85-6bk6b" (UID: "018c6179-6e65-461a-b457-a5eb949672de") : secret "infra-operator-webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.191118 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.197781 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.202233 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.205078 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-nrlzg" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.217348 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.235217 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6h29c\" (UniqueName: \"kubernetes.io/projected/650ecd19-902d-48f8-bea4-2c7f120885dc-kube-api-access-6h29c\") pod \"telemetry-operator-controller-manager-b8d54b5d7-b5rx5\" (UID: \"650ecd19-902d-48f8-bea4-2c7f120885dc\") " pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.256483 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.266918 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.291162 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9b7p\" (UniqueName: \"kubernetes.io/projected/d7665104-7dc0-4450-9d2a-85d514354c9e-kube-api-access-f9b7p\") pod \"watcher-operator-controller-manager-6b9957f54f-pwg7t\" (UID: \"d7665104-7dc0-4450-9d2a-85d514354c9e\") " pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.291213 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqh8f\" (UniqueName: \"kubernetes.io/projected/0a24bee6-3dd0-47b3-881a-b5cff49d1e1e-kube-api-access-hqh8f\") pod \"test-operator-controller-manager-85777745bb-5b98j\" (UID: \"0a24bee6-3dd0-47b3-881a-b5cff49d1e1e\") " pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.315541 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqkvr\" (UniqueName: \"kubernetes.io/projected/96ca8cc4-c237-43b0-ae6b-7cc86a183f46-kube-api-access-vqkvr\") pod \"swift-operator-controller-manager-84d6b4b759-9fvns\" (UID: \"96ca8cc4-c237-43b0-ae6b-7cc86a183f46\") " pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.346269 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" event={"ID":"ac5ac6e0-2521-4daa-8c0e-091f13b7a406","Type":"ContainerStarted","Data":"11eef2f76e3f49eaddaa1ff164bb79526da9bf6e0c56bec29c2a023f1bb35e60"} Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.353975 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.361568 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqh8f\" (UniqueName: \"kubernetes.io/projected/0a24bee6-3dd0-47b3-881a-b5cff49d1e1e-kube-api-access-hqh8f\") pod \"test-operator-controller-manager-85777745bb-5b98j\" (UID: \"0a24bee6-3dd0-47b3-881a-b5cff49d1e1e\") " pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.368916 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.370876 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.373326 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-sb79v" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.373530 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.378609 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.392266 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9b7p\" (UniqueName: \"kubernetes.io/projected/d7665104-7dc0-4450-9d2a-85d514354c9e-kube-api-access-f9b7p\") pod \"watcher-operator-controller-manager-6b9957f54f-pwg7t\" (UID: \"d7665104-7dc0-4450-9d2a-85d514354c9e\") " pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.435849 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.445095 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9b7p\" (UniqueName: \"kubernetes.io/projected/d7665104-7dc0-4450-9d2a-85d514354c9e-kube-api-access-f9b7p\") pod \"watcher-operator-controller-manager-6b9957f54f-pwg7t\" (UID: \"d7665104-7dc0-4450-9d2a-85d514354c9e\") " pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.488576 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.495728 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.496486 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l68f2\" (UniqueName: \"kubernetes.io/projected/002e4392-6fb3-4354-86d1-8c6f1727e05c-kube-api-access-l68f2\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.496565 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.566311 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.600344 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l68f2\" (UniqueName: \"kubernetes.io/projected/002e4392-6fb3-4354-86d1-8c6f1727e05c-kube-api-access-l68f2\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.600508 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.600536 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.600649 4783 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.600690 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert podName:ebfbf228-ed71-4331-96be-8105d5029d2c nodeName:}" failed. No retries permitted until 2025-10-02 11:09:34.600677959 +0000 UTC m=+1007.916872220 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert") pod "openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" (UID: "ebfbf228-ed71-4331-96be-8105d5029d2c") : secret "openstack-baremetal-operator-webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.602328 4783 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: E1002 11:09:33.602359 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert podName:002e4392-6fb3-4354-86d1-8c6f1727e05c nodeName:}" failed. No retries permitted until 2025-10-02 11:09:34.102351324 +0000 UTC m=+1007.418545575 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert") pod "openstack-operator-controller-manager-5f7d749dc7-gzmfg" (UID: "002e4392-6fb3-4354-86d1-8c6f1727e05c") : secret "webhook-server-cert" not found Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.602378 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.618722 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.618859 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.623002 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-bqnmt" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.695502 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l68f2\" (UniqueName: \"kubernetes.io/projected/002e4392-6fb3-4354-86d1-8c6f1727e05c-kube-api-access-l68f2\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.710631 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kgfw\" (UniqueName: \"kubernetes.io/projected/90aa3047-331d-471b-b009-9be03d87b3ed-kube-api-access-5kgfw\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf\" (UID: \"90aa3047-331d-471b-b009-9be03d87b3ed\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.787102 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.812016 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kgfw\" (UniqueName: \"kubernetes.io/projected/90aa3047-331d-471b-b009-9be03d87b3ed-kube-api-access-5kgfw\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf\" (UID: \"90aa3047-331d-471b-b009-9be03d87b3ed\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.822938 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks"] Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.840827 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kgfw\" (UniqueName: \"kubernetes.io/projected/90aa3047-331d-471b-b009-9be03d87b3ed-kube-api-access-5kgfw\") pod \"rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf\" (UID: \"90aa3047-331d-471b-b009-9be03d87b3ed\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.861145 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6"] Oct 02 11:09:33 crc kubenswrapper[4783]: W1002 11:09:33.931495 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod87d34dfd_25c7_4f4c_bbda_058e38a01994.slice/crio-32c31252bdc28002373dfee3239b4515f15902dbc04f9ef12ec25d7128923e8a WatchSource:0}: Error finding container 32c31252bdc28002373dfee3239b4515f15902dbc04f9ef12ec25d7128923e8a: Status 404 returned error can't find the container with id 32c31252bdc28002373dfee3239b4515f15902dbc04f9ef12ec25d7128923e8a Oct 02 11:09:33 crc kubenswrapper[4783]: W1002 11:09:33.941529 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podae698aff_8888_48b6_9c37_cbcea6e7bc6e.slice/crio-82e69e1679f154abec84445725eaddbdf03828e3c5c3f7c96be26074eb3ceb16 WatchSource:0}: Error finding container 82e69e1679f154abec84445725eaddbdf03828e3c5c3f7c96be26074eb3ceb16: Status 404 returned error can't find the container with id 82e69e1679f154abec84445725eaddbdf03828e3c5c3f7c96be26074eb3ceb16 Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.953895 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" Oct 02 11:09:33 crc kubenswrapper[4783]: I1002 11:09:33.954878 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p"] Oct 02 11:09:33 crc kubenswrapper[4783]: W1002 11:09:33.969373 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod24d5bc39_4bbb_47af_94e2_222118ccdabb.slice/crio-8d176f3c65ae7e42a959ef7f60c31c723e96b60f8e982d5993893b6bc8faec4c WatchSource:0}: Error finding container 8d176f3c65ae7e42a959ef7f60c31c723e96b60f8e982d5993893b6bc8faec4c: Status 404 returned error can't find the container with id 8d176f3c65ae7e42a959ef7f60c31c723e96b60f8e982d5993893b6bc8faec4c Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.053611 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8"] Oct 02 11:09:34 crc kubenswrapper[4783]: W1002 11:09:34.115672 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8041f454_2294_478d_b4cd_ffa769b8f709.slice/crio-16657fd4626a490d9a6f7ca552f7bca9bb65de29e746160fe816cfc77a085528 WatchSource:0}: Error finding container 16657fd4626a490d9a6f7ca552f7bca9bb65de29e746160fe816cfc77a085528: Status 404 returned error can't find the container with id 16657fd4626a490d9a6f7ca552f7bca9bb65de29e746160fe816cfc77a085528 Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.116298 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:34 crc kubenswrapper[4783]: E1002 11:09:34.116442 4783 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Oct 02 11:09:34 crc kubenswrapper[4783]: E1002 11:09:34.116489 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert podName:002e4392-6fb3-4354-86d1-8c6f1727e05c nodeName:}" failed. No retries permitted until 2025-10-02 11:09:35.116475615 +0000 UTC m=+1008.432669876 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert") pod "openstack-operator-controller-manager-5f7d749dc7-gzmfg" (UID: "002e4392-6fb3-4354-86d1-8c6f1727e05c") : secret "webhook-server-cert" not found Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.217265 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.236286 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/018c6179-6e65-461a-b457-a5eb949672de-cert\") pod \"infra-operator-controller-manager-9d6c5db85-6bk6b\" (UID: \"018c6179-6e65-461a-b457-a5eb949672de\") " pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.362588 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" event={"ID":"87d34dfd-25c7-4f4c-bbda-058e38a01994","Type":"ContainerStarted","Data":"32c31252bdc28002373dfee3239b4515f15902dbc04f9ef12ec25d7128923e8a"} Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.366108 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-88c7-w5766"] Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.367041 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" event={"ID":"52d93c12-b942-4ad3-935a-b555026711ea","Type":"ContainerStarted","Data":"d7090452d50d6e215f3db2a4dbeb35de1de5e4bcd6af80eea5b163a272dbda83"} Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.373394 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" event={"ID":"24d5bc39-4bbb-47af-94e2-222118ccdabb","Type":"ContainerStarted","Data":"8d176f3c65ae7e42a959ef7f60c31c723e96b60f8e982d5993893b6bc8faec4c"} Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.374864 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" event={"ID":"ae698aff-8888-48b6-9c37-cbcea6e7bc6e","Type":"ContainerStarted","Data":"82e69e1679f154abec84445725eaddbdf03828e3c5c3f7c96be26074eb3ceb16"} Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.376105 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" event={"ID":"8041f454-2294-478d-b4cd-ffa769b8f709","Type":"ContainerStarted","Data":"16657fd4626a490d9a6f7ca552f7bca9bb65de29e746160fe816cfc77a085528"} Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.377916 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj"] Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.428081 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.622629 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.629530 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/ebfbf228-ed71-4331-96be-8105d5029d2c-cert\") pod \"openstack-baremetal-operator-controller-manager-5869cb545-4pwjh\" (UID: \"ebfbf228-ed71-4331-96be-8105d5029d2c\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.656526 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm"] Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.719497 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk"] Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.746850 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc"] Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.828684 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.966168 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf"] Oct 02 11:09:34 crc kubenswrapper[4783]: I1002 11:09:34.991478 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-85777745bb-5b98j"] Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.000517 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm"] Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.007149 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns"] Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.016272 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5"] Oct 02 11:09:35 crc kubenswrapper[4783]: W1002 11:09:35.052358 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod96ca8cc4_c237_43b0_ae6b_7cc86a183f46.slice/crio-5ba5a0ad0ce05953c971a78a6e8ceaea05ac5ead29bfff06ab85b1af926cf1f2 WatchSource:0}: Error finding container 5ba5a0ad0ce05953c971a78a6e8ceaea05ac5ead29bfff06ab85b1af926cf1f2: Status 404 returned error can't find the container with id 5ba5a0ad0ce05953c971a78a6e8ceaea05ac5ead29bfff06ab85b1af926cf1f2 Oct 02 11:09:35 crc kubenswrapper[4783]: W1002 11:09:35.053461 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod650ecd19_902d_48f8_bea4_2c7f120885dc.slice/crio-a38cde5d1d2dc3a20ad55c93bc5c0632a820e1261db561cea3f0afb191ff12bb WatchSource:0}: Error finding container a38cde5d1d2dc3a20ad55c93bc5c0632a820e1261db561cea3f0afb191ff12bb: Status 404 returned error can't find the container with id a38cde5d1d2dc3a20ad55c93bc5c0632a820e1261db561cea3f0afb191ff12bb Oct 02 11:09:35 crc kubenswrapper[4783]: W1002 11:09:35.094549 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7d7c0a51_805f_422d_bb62_75a53f9a80d0.slice/crio-58a08d6427351e71ab61bf305b57c8db1d80e6acee78f8f8c445751806544bfd WatchSource:0}: Error finding container 58a08d6427351e71ab61bf305b57c8db1d80e6acee78f8f8c445751806544bfd: Status 404 returned error can't find the container with id 58a08d6427351e71ab61bf305b57c8db1d80e6acee78f8f8c445751806544bfd Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.133743 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b"] Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.155601 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.156803 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9"] Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.164774 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/002e4392-6fb3-4354-86d1-8c6f1727e05c-cert\") pod \"openstack-operator-controller-manager-5f7d749dc7-gzmfg\" (UID: \"002e4392-6fb3-4354-86d1-8c6f1727e05c\") " pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.168943 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h"] Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.213933 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89"] Oct 02 11:09:35 crc kubenswrapper[4783]: E1002 11:09:35.221653 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:a517abc6427ab73fed93b0bd89a6eb52d0311fbfb0c00752f889baf8ffd5068f,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-ss6j5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-64cd67b5cb-kfxk9_openstack-operators(82a47dd2-82ef-4fb9-9216-d14a2332683f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.225801 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t"] Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.243149 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:35 crc kubenswrapper[4783]: W1002 11:09:35.277264 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod139b1abc_ddcf_4ccc_83f4_deb58a682b0c.slice/crio-a01882797c3dc9d23df6cf11823ccbd9896875a16f37380d2dc2a83b8906204f WatchSource:0}: Error finding container a01882797c3dc9d23df6cf11823ccbd9896875a16f37380d2dc2a83b8906204f: Status 404 returned error can't find the container with id a01882797c3dc9d23df6cf11823ccbd9896875a16f37380d2dc2a83b8906204f Oct 02 11:09:35 crc kubenswrapper[4783]: E1002 11:09:35.280984 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:09c2f519ea218f6038b7be039b8e6ac33ee93b217b9be0d2d18a5e7f94faae06,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f9b7p,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-6b9957f54f-pwg7t_openstack-operators(d7665104-7dc0-4450-9d2a-85d514354c9e): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 02 11:09:35 crc kubenswrapper[4783]: E1002 11:09:35.288316 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/infra-operator@sha256:3f96f0843934236c261db73dacb50fc12a288890562ee4ebdc9ec22360937cd3,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{600 -3} {} 600m DecimalSI},memory: {{2147483648 0} {} 2Gi BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{536870912 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g75m9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod infra-operator-controller-manager-9d6c5db85-6bk6b_openstack-operators(018c6179-6e65-461a-b457-a5eb949672de): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.384132 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" event={"ID":"c788b295-31bf-496b-8b4f-fccc3ff3be17","Type":"ContainerStarted","Data":"176fa2d79c568224cdea0404a3ca2c2fa9cf004fc3e612ae7cdac9882b956d6f"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.384960 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" event={"ID":"96ca8cc4-c237-43b0-ae6b-7cc86a183f46","Type":"ContainerStarted","Data":"5ba5a0ad0ce05953c971a78a6e8ceaea05ac5ead29bfff06ab85b1af926cf1f2"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.401653 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" event={"ID":"0a24bee6-3dd0-47b3-881a-b5cff49d1e1e","Type":"ContainerStarted","Data":"786e810be0c392847145c097a317fc10e1bc7155b872d2c89285f18e48e51084"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.416504 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" event={"ID":"7d7c0a51-805f-422d-bb62-75a53f9a80d0","Type":"ContainerStarted","Data":"58a08d6427351e71ab61bf305b57c8db1d80e6acee78f8f8c445751806544bfd"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.418855 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" event={"ID":"650ecd19-902d-48f8-bea4-2c7f120885dc","Type":"ContainerStarted","Data":"a38cde5d1d2dc3a20ad55c93bc5c0632a820e1261db561cea3f0afb191ff12bb"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.420444 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" event={"ID":"56a432ad-7000-45e3-ac88-9ebd3a1eb3a7","Type":"ContainerStarted","Data":"a9f5708c93148ecb93166fd2e59df2b0693cf055dec64858b4172a29d46f9d7f"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.422045 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" event={"ID":"d7665104-7dc0-4450-9d2a-85d514354c9e","Type":"ContainerStarted","Data":"ae397397232d089235fb28399c5a9de3c6b9856810a1711bfdcb0382f9eacd92"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.424010 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" event={"ID":"81c143d8-5f4b-4baf-9cb1-6f34110f4833","Type":"ContainerStarted","Data":"7a7457dd78fb0b4df60d09ba8a26c9bc91e98f0b4d0c550ab8222d46d0cfea63"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.425737 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" event={"ID":"1f8c1065-586c-4dce-a4ce-fc262d00063d","Type":"ContainerStarted","Data":"70fb5facda2e0d3e8362ca952f33f47f30bec0f5f9a090de868f16ac5504d5cb"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.433938 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" event={"ID":"139b1abc-ddcf-4ccc-83f4-deb58a682b0c","Type":"ContainerStarted","Data":"a01882797c3dc9d23df6cf11823ccbd9896875a16f37380d2dc2a83b8906204f"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.446137 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" event={"ID":"90aa3047-331d-471b-b009-9be03d87b3ed","Type":"ContainerStarted","Data":"e81b12caced4298059eabfd079d0a1c0c88fc7593908884d94ef831b140dab7f"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.447972 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" event={"ID":"a57ed846-184b-49fd-af9f-efc1fbba98e5","Type":"ContainerStarted","Data":"a33f4539982403b557145b70aebe92d351f5f66c510842da0306e88240ca2ac7"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.453085 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" event={"ID":"018c6179-6e65-461a-b457-a5eb949672de","Type":"ContainerStarted","Data":"222aaeb43bf80af06b5f90a7fb5e9e47c0ea186a2b87ed5349df65f0cd049346"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.463073 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" event={"ID":"6cf94630-4019-4863-80be-6e1088cf3407","Type":"ContainerStarted","Data":"035a785d060fd8ceab824f165549ec304b34534a2f3a9b80439bc35f46c34df7"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.477374 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" event={"ID":"82a47dd2-82ef-4fb9-9216-d14a2332683f","Type":"ContainerStarted","Data":"77739a8ca2c7a3ef4fe483fc00ffd1103987462654defc4b1b5fa0131dba25ad"} Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.599749 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh"] Oct 02 11:09:35 crc kubenswrapper[4783]: W1002 11:09:35.616670 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podebfbf228_ed71_4331_96be_8105d5029d2c.slice/crio-3012090a29cac963c819afd02ef99730b9d216de6789995418061fc0b5d0d318 WatchSource:0}: Error finding container 3012090a29cac963c819afd02ef99730b9d216de6789995418061fc0b5d0d318: Status 404 returned error can't find the container with id 3012090a29cac963c819afd02ef99730b9d216de6789995418061fc0b5d0d318 Oct 02 11:09:35 crc kubenswrapper[4783]: E1002 11:09:35.694981 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" podUID="82a47dd2-82ef-4fb9-9216-d14a2332683f" Oct 02 11:09:35 crc kubenswrapper[4783]: I1002 11:09:35.817395 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg"] Oct 02 11:09:35 crc kubenswrapper[4783]: W1002 11:09:35.854980 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod002e4392_6fb3_4354_86d1_8c6f1727e05c.slice/crio-4904d9c6cc8f9532ab8ba40dba21d43b309f2368c0d693f4f57d323da531c52e WatchSource:0}: Error finding container 4904d9c6cc8f9532ab8ba40dba21d43b309f2368c0d693f4f57d323da531c52e: Status 404 returned error can't find the container with id 4904d9c6cc8f9532ab8ba40dba21d43b309f2368c0d693f4f57d323da531c52e Oct 02 11:09:35 crc kubenswrapper[4783]: E1002 11:09:35.858916 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" podUID="018c6179-6e65-461a-b457-a5eb949672de" Oct 02 11:09:35 crc kubenswrapper[4783]: E1002 11:09:35.874573 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" podUID="d7665104-7dc0-4450-9d2a-85d514354c9e" Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.520616 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" event={"ID":"ebfbf228-ed71-4331-96be-8105d5029d2c","Type":"ContainerStarted","Data":"3012090a29cac963c819afd02ef99730b9d216de6789995418061fc0b5d0d318"} Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.532908 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" event={"ID":"d7665104-7dc0-4450-9d2a-85d514354c9e","Type":"ContainerStarted","Data":"aca653fb9cb3c917ec803214fe695e2b53b853e1ffd976c9ccf4972437887e68"} Oct 02 11:09:36 crc kubenswrapper[4783]: E1002 11:09:36.543081 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:09c2f519ea218f6038b7be039b8e6ac33ee93b217b9be0d2d18a5e7f94faae06\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" podUID="d7665104-7dc0-4450-9d2a-85d514354c9e" Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.544297 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" event={"ID":"018c6179-6e65-461a-b457-a5eb949672de","Type":"ContainerStarted","Data":"5c6238f892a8022520a1dda0bb27173c40a3a552e3b46f3b1bddc358759e5d25"} Oct 02 11:09:36 crc kubenswrapper[4783]: E1002 11:09:36.547714 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:3f96f0843934236c261db73dacb50fc12a288890562ee4ebdc9ec22360937cd3\\\"\"" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" podUID="018c6179-6e65-461a-b457-a5eb949672de" Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.570679 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" event={"ID":"82a47dd2-82ef-4fb9-9216-d14a2332683f","Type":"ContainerStarted","Data":"fe41fb61d94c7c6dff606acfbf7f1167d985a7f210b0b976b27311d1ef08b98a"} Oct 02 11:09:36 crc kubenswrapper[4783]: E1002 11:09:36.577760 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:a517abc6427ab73fed93b0bd89a6eb52d0311fbfb0c00752f889baf8ffd5068f\\\"\"" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" podUID="82a47dd2-82ef-4fb9-9216-d14a2332683f" Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.597269 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" event={"ID":"002e4392-6fb3-4354-86d1-8c6f1727e05c","Type":"ContainerStarted","Data":"cfabf42201b0134eb0128b8c071d4fd90c40767c10979e2aeec6c97b537aa867"} Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.597316 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" event={"ID":"002e4392-6fb3-4354-86d1-8c6f1727e05c","Type":"ContainerStarted","Data":"4904d9c6cc8f9532ab8ba40dba21d43b309f2368c0d693f4f57d323da531c52e"} Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.597994 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:36 crc kubenswrapper[4783]: I1002 11:09:36.848529 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" podStartSLOduration=3.848507161 podStartE2EDuration="3.848507161s" podCreationTimestamp="2025-10-02 11:09:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:09:36.831850897 +0000 UTC m=+1010.148045158" watchObservedRunningTime="2025-10-02 11:09:36.848507161 +0000 UTC m=+1010.164701422" Oct 02 11:09:37 crc kubenswrapper[4783]: I1002 11:09:37.632611 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" event={"ID":"002e4392-6fb3-4354-86d1-8c6f1727e05c","Type":"ContainerStarted","Data":"0824eeb072bc28050b7d68c434c77b0176c17ed261abdcb44d2ca3638a07c4ce"} Oct 02 11:09:37 crc kubenswrapper[4783]: E1002 11:09:37.634634 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/infra-operator@sha256:3f96f0843934236c261db73dacb50fc12a288890562ee4ebdc9ec22360937cd3\\\"\"" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" podUID="018c6179-6e65-461a-b457-a5eb949672de" Oct 02 11:09:37 crc kubenswrapper[4783]: E1002 11:09:37.634674 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:a517abc6427ab73fed93b0bd89a6eb52d0311fbfb0c00752f889baf8ffd5068f\\\"\"" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" podUID="82a47dd2-82ef-4fb9-9216-d14a2332683f" Oct 02 11:09:37 crc kubenswrapper[4783]: E1002 11:09:37.637448 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:09c2f519ea218f6038b7be039b8e6ac33ee93b217b9be0d2d18a5e7f94faae06\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" podUID="d7665104-7dc0-4450-9d2a-85d514354c9e" Oct 02 11:09:45 crc kubenswrapper[4783]: I1002 11:09:45.249768 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-5f7d749dc7-gzmfg" Oct 02 11:09:49 crc kubenswrapper[4783]: E1002 11:09:49.628335 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/barbican-operator@sha256:bb39758cc8cd0d2cd02841dc81b53fd88647e2db15ee16cdd8c44d4098a942fd" Oct 02 11:09:49 crc kubenswrapper[4783]: E1002 11:09:49.629157 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/barbican-operator@sha256:bb39758cc8cd0d2cd02841dc81b53fd88647e2db15ee16cdd8c44d4098a942fd,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-w4mth,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-operator-controller-manager-6ff8b75857-x7698_openstack-operators(87d34dfd-25c7-4f4c-bbda-058e38a01994): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:50 crc kubenswrapper[4783]: E1002 11:09:50.071723 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/glance-operator@sha256:21792a2317c0a55e40b2a02a7d5d4682b76538ed2a2e0633199aa395e60ecc72" Oct 02 11:09:50 crc kubenswrapper[4783]: E1002 11:09:50.071979 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/glance-operator@sha256:21792a2317c0a55e40b2a02a7d5d4682b76538ed2a2e0633199aa395e60ecc72,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-2g7sd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-operator-controller-manager-84958c4d49-92cr8_openstack-operators(8041f454-2294-478d-b4cd-ffa769b8f709): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:51 crc kubenswrapper[4783]: E1002 11:09:51.535467 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/octavia-operator@sha256:e1328760310f3bbf4548b8b1268cd711087dd91212b92bb0be287cad1f1b6fe9" Oct 02 11:09:51 crc kubenswrapper[4783]: E1002 11:09:51.535724 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/octavia-operator@sha256:e1328760310f3bbf4548b8b1268cd711087dd91212b92bb0be287cad1f1b6fe9,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-t9zs9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod octavia-operator-controller-manager-7b787867f4-k7m89_openstack-operators(139b1abc-ddcf-4ccc-83f4-deb58a682b0c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:52 crc kubenswrapper[4783]: E1002 11:09:52.804251 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/ovn-operator@sha256:1051afc168038fb814f75e7a5f07c588b295a83ebd143dcd8b46d799e31ad302" Oct 02 11:09:52 crc kubenswrapper[4783]: E1002 11:09:52.804797 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:1051afc168038fb814f75e7a5f07c588b295a83ebd143dcd8b46d799e31ad302,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-tfwss,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-9976ff44c-xbh4h_openstack-operators(81c143d8-5f4b-4baf-9cb1-6f34110f4833): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:53 crc kubenswrapper[4783]: E1002 11:09:53.440440 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/designate-operator@sha256:f6b935f67979298c3c263ad84d277e5cf26c0dbba3f85f255c1ec4d1d75241d2" Oct 02 11:09:53 crc kubenswrapper[4783]: E1002 11:09:53.440606 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/designate-operator@sha256:f6b935f67979298c3c263ad84d277e5cf26c0dbba3f85f255c1ec4d1d75241d2,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-cmtcz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod designate-operator-controller-manager-84f4f7b77b-wt52p_openstack-operators(52d93c12-b942-4ad3-935a-b555026711ea): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:53 crc kubenswrapper[4783]: E1002 11:09:53.962796 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/horizon-operator@sha256:f5f0d2eb534f763cf6578af513add1c21c1659b2cd75214dfddfedb9eebf6397" Oct 02 11:09:53 crc kubenswrapper[4783]: E1002 11:09:53.963231 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/horizon-operator@sha256:f5f0d2eb534f763cf6578af513add1c21c1659b2cd75214dfddfedb9eebf6397,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6zv88,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-operator-controller-manager-9f4696d94-9pqks_openstack-operators(ae698aff-8888-48b6-9c37-cbcea6e7bc6e): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:54 crc kubenswrapper[4783]: E1002 11:09:54.570088 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:e7cfed051c1cf801e651fd4035070e38698039f284ac0b2a0332769fdbb4a9c8" Oct 02 11:09:54 crc kubenswrapper[4783]: E1002 11:09:54.570534 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:e7cfed051c1cf801e651fd4035070e38698039f284ac0b2a0332769fdbb4a9c8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:true,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-baremetal-operator-agent:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_ANSIBLEEE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-ansibleee-runner:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_EVALUATOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-evaluator:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_AODH_NOTIFIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-aodh-notifier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_APACHE_IMAGE_URL_DEFAULT,Value:registry.redhat.io/ubi9/httpd-24:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_KEYSTONE_LISTENER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-keystone-listener:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_BARBICAN_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-barbican-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_IPMI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-ipmi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_MYSQLD_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/mysqld-exporter:v0.15.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_NOTIFICATION_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ceilometer-notification:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CEILOMETER_SGCORE_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/sg-core:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_BACKUP_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-backup:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_CINDER_VOLUME_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cinder-volume:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_BACKENDBIND9_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-backend-bind9:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_CENTRAL_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-central:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_MDNS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-mdns:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_PRODUCER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-producer:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_UNBOUND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-unbound:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_DESIGNATE_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-designate-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_FRR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-frr:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_ISCSID_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-iscsid:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_KEPLER_IMAGE_URL_DEFAULT,Value:quay.io/sustainable_computing_io/kepler:release-0.7.12,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_LOGROTATE_CROND_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-cron:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_MULTIPATHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-multipathd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_DHCP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-dhcp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_METADATA_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-metadata-agent-ovn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_OVN_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-ovn-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NEUTRON_SRIOV_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-sriov-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_NODE_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/prometheus/node-exporter:v1.5.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_OVN_BGP_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-bgp-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_EDPM_PODMAN_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/navidys/prometheus-podman-exporter:v1.10.1,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_GLANCE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_CFNAPI_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-api-cfn:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HEAT_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-heat-engine:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_HORIZON_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_MEMCACHED_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_INFRA_REDIS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-redis:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_INSPECTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-inspector:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_NEUTRON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-neutron-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PXE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ironic-pxe:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_IRONIC_PYTHON_AGENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/ironic-python-agent:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KEYSTONE_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-keystone:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_KSM_IMAGE_URL_DEFAULT,Value:registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.15.0,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MANILA_SHARE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-manila-share:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_MARIADB_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NET_UTILS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-netutils:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NEUTRON_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_CONDUCTOR_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-conductor:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_NOVNC_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-novncproxy:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_NOVA_SCHEDULER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HEALTHMANAGER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-health-manager:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_HOUSEKEEPING_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-housekeeping:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_RSYSLOG_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rsyslog:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OCTAVIA_WORKER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-octavia-worker:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_CLIENT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_MUST_GATHER_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-must-gather:latest,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OPENSTACK_NETWORK_EXPORTER_IMAGE_URL_DEFAULT,Value:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OS_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/edpm-hardened-uefi:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-controller:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_CONTROLLER_OVS_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-base:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-nb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_NORTHD_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-northd:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_OVN_SB_DBCLUSTER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-ovn-sb-db-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_RABBITMQ_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_ACCOUNT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-account:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_CONTAINER_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-container:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_OBJECT_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-object:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_SWIFT_PROXY_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-swift-proxy-server:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_TEST_TEMPEST_IMAGE_URL_DEFAULT,Value:quay.io/podified-antelope-centos9/openstack-tempest-all:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_API_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-api:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_APPLIER_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-applier:current-podified,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_WATCHER_DECISION_ENGINE_IMAGE_URL_DEFAULT,Value:quay.io/podified-master-centos9/openstack-watcher-decision-engine:current-podified,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:cert,ReadOnly:true,MountPath:/tmp/k8s-webhook-server/serving-certs,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4q2wk,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-baremetal-operator-controller-manager-5869cb545-4pwjh_openstack-operators(ebfbf228-ed71-4331-96be-8105d5029d2c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:55 crc kubenswrapper[4783]: E1002 11:09:55.042913 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:917e6dcc519277c46e42898bc9f0f066790fa7b9633fcde668cc8a68a547c13c" Oct 02 11:09:55 crc kubenswrapper[4783]: E1002 11:09:55.043078 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:917e6dcc519277c46e42898bc9f0f066790fa7b9633fcde668cc8a68a547c13c,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-6vfg6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-5d889d78cf-pqhp6_openstack-operators(24d5bc39-4bbb-47af-94e2-222118ccdabb): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:55 crc kubenswrapper[4783]: E1002 11:09:55.930301 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/neutron-operator@sha256:acdeebaa51f962066f42f38b6c2d34a62fc6a24f58f9ee63d61b1e0cafbb29f8" Oct 02 11:09:55 crc kubenswrapper[4783]: E1002 11:09:55.930786 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:acdeebaa51f962066f42f38b6c2d34a62fc6a24f58f9ee63d61b1e0cafbb29f8,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dwpvp,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-849d5b9b84-6xwmm_openstack-operators(a57ed846-184b-49fd-af9f-efc1fbba98e5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:56 crc kubenswrapper[4783]: E1002 11:09:56.389697 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:23fcec0642cbd40af10bca0c5d4e538662d21eda98d6dfec37c38b4d7a47191a" Oct 02 11:09:56 crc kubenswrapper[4783]: E1002 11:09:56.389907 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:23fcec0642cbd40af10bca0c5d4e538662d21eda98d6dfec37c38b4d7a47191a,Command:[/manager],Args:[--health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080 --leader-elect],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-7pp2r,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-5bd55b4bff-qtqcj_openstack-operators(6cf94630-4019-4863-80be-6e1088cf3407): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:09:57 crc kubenswrapper[4783]: E1002 11:09:57.983215 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" podUID="81c143d8-5f4b-4baf-9cb1-6f34110f4833" Oct 02 11:09:57 crc kubenswrapper[4783]: E1002 11:09:57.988683 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" podUID="8041f454-2294-478d-b4cd-ffa769b8f709" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.000198 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" podUID="139b1abc-ddcf-4ccc-83f4-deb58a682b0c" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.093006 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" podUID="52d93c12-b942-4ad3-935a-b555026711ea" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.099655 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" podUID="ae698aff-8888-48b6-9c37-cbcea6e7bc6e" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.159102 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" podUID="87d34dfd-25c7-4f4c-bbda-058e38a01994" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.202782 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" podUID="a57ed846-184b-49fd-af9f-efc1fbba98e5" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.242795 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" podUID="ebfbf228-ed71-4331-96be-8105d5029d2c" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.271020 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" podUID="24d5bc39-4bbb-47af-94e2-222118ccdabb" Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.331125 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" podUID="6cf94630-4019-4863-80be-6e1088cf3407" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.792210 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" event={"ID":"ebfbf228-ed71-4331-96be-8105d5029d2c","Type":"ContainerStarted","Data":"0143a393c1c82d950a73a2fdfa031e91903189238e59419ccf9af5c3dcb14aeb"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.793931 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:e7cfed051c1cf801e651fd4035070e38698039f284ac0b2a0332769fdbb4a9c8\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" podUID="ebfbf228-ed71-4331-96be-8105d5029d2c" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.803396 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" event={"ID":"018c6179-6e65-461a-b457-a5eb949672de","Type":"ContainerStarted","Data":"4e088ee4acd882723d17920c613879312959b9d624b185a7ad9cf0617bce254f"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.804176 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.808574 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" event={"ID":"0a24bee6-3dd0-47b3-881a-b5cff49d1e1e","Type":"ContainerStarted","Data":"2f53f505f358d0129e326ca3010a2612af97bb57d079735bd9511a2fdf82b727"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.819069 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" event={"ID":"a57ed846-184b-49fd-af9f-efc1fbba98e5","Type":"ContainerStarted","Data":"58daad0fd5dc3fcfbf2fa08168f1b4ae4b4a1490501f996b51ea01e60c6da712"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.820889 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:acdeebaa51f962066f42f38b6c2d34a62fc6a24f58f9ee63d61b1e0cafbb29f8\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" podUID="a57ed846-184b-49fd-af9f-efc1fbba98e5" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.833693 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" event={"ID":"82a47dd2-82ef-4fb9-9216-d14a2332683f","Type":"ContainerStarted","Data":"2731104ceb9ebcdc921be1f51e0b45c5187c068c886865cb60e5ba6a9f62707f"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.834308 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.840701 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" event={"ID":"c788b295-31bf-496b-8b4f-fccc3ff3be17","Type":"ContainerStarted","Data":"6c48b11b8c56010731268d8f440776241f64f7b95bcbf2880fbae842ded152f1"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.843650 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" event={"ID":"650ecd19-902d-48f8-bea4-2c7f120885dc","Type":"ContainerStarted","Data":"dfcfada0c2086f3247a4697e4395cde5b84280dd008f1b8e0a17e54031036276"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.844783 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" event={"ID":"56a432ad-7000-45e3-ac88-9ebd3a1eb3a7","Type":"ContainerStarted","Data":"26ed17f8e85a8dea82731bdd3de145f0e9979b915a549ab2b0e626d1081173a4"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.844801 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" event={"ID":"56a432ad-7000-45e3-ac88-9ebd3a1eb3a7","Type":"ContainerStarted","Data":"6829c6810491604c805647dea54a64f1bc41a0952d362666fbcbe04a3ef51851"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.845363 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.846511 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" event={"ID":"90aa3047-331d-471b-b009-9be03d87b3ed","Type":"ContainerStarted","Data":"7d336e862e91fad7233895180d512b387f55baf60e6ed01b3db94296dbfebdc0"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.863370 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" event={"ID":"96ca8cc4-c237-43b0-ae6b-7cc86a183f46","Type":"ContainerStarted","Data":"5159d2ba00fdf9e1942ba74fb88a2a0df03d44b8f42dd779927b7778b3ab92f5"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.876908 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" event={"ID":"24d5bc39-4bbb-47af-94e2-222118ccdabb","Type":"ContainerStarted","Data":"40a987892ef6d6b8db50c61d95c6ae799b8577b37a344768730e46456cd13cc5"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.885647 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:917e6dcc519277c46e42898bc9f0f066790fa7b9633fcde668cc8a68a547c13c\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" podUID="24d5bc39-4bbb-47af-94e2-222118ccdabb" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.894604 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" event={"ID":"81c143d8-5f4b-4baf-9cb1-6f34110f4833","Type":"ContainerStarted","Data":"50c4a85b9aae5c3d80afa7089274dfc65220cd54959ee01883f1585d0e253d1d"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.895630 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:1051afc168038fb814f75e7a5f07c588b295a83ebd143dcd8b46d799e31ad302\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" podUID="81c143d8-5f4b-4baf-9cb1-6f34110f4833" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.906470 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" event={"ID":"ae698aff-8888-48b6-9c37-cbcea6e7bc6e","Type":"ContainerStarted","Data":"9ba7db4e6778b2857e4476da4d030ea1df3158652bb4cd775fb2bb6204827f72"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.907530 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:f5f0d2eb534f763cf6578af513add1c21c1659b2cd75214dfddfedb9eebf6397\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" podUID="ae698aff-8888-48b6-9c37-cbcea6e7bc6e" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.912246 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" event={"ID":"87d34dfd-25c7-4f4c-bbda-058e38a01994","Type":"ContainerStarted","Data":"d78121450f1fbb60801c617b14cdbaeed3c212dd59654e7a7e58a4b5d8f542a1"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.913208 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/barbican-operator@sha256:bb39758cc8cd0d2cd02841dc81b53fd88647e2db15ee16cdd8c44d4098a942fd\\\"\"" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" podUID="87d34dfd-25c7-4f4c-bbda-058e38a01994" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.923313 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" event={"ID":"139b1abc-ddcf-4ccc-83f4-deb58a682b0c","Type":"ContainerStarted","Data":"c169eaadc08744c7ace23a38fb75a046df6fe8a6a039b4f2723378f0fbb266a1"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.929146 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e1328760310f3bbf4548b8b1268cd711087dd91212b92bb0be287cad1f1b6fe9\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" podUID="139b1abc-ddcf-4ccc-83f4-deb58a682b0c" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.932011 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" event={"ID":"52d93c12-b942-4ad3-935a-b555026711ea","Type":"ContainerStarted","Data":"863badb4870bb6837b7f39b51c26688ca5d4a1f89d21e470e5f5dbfa330a851f"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.934932 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:f6b935f67979298c3c263ad84d277e5cf26c0dbba3f85f255c1ec4d1d75241d2\\\"\"" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" podUID="52d93c12-b942-4ad3-935a-b555026711ea" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.938196 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" event={"ID":"ac5ac6e0-2521-4daa-8c0e-091f13b7a406","Type":"ContainerStarted","Data":"bea013937bb2f405a822944a0bdbb97f3f0bd1be9e572d6a61c0180bf0998cf4"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.948562 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" event={"ID":"8041f454-2294-478d-b4cd-ffa769b8f709","Type":"ContainerStarted","Data":"9268dacb1f19ef043795acbd79b3809951f9811abf28ad8745a3ea5501104286"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.952897 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:21792a2317c0a55e40b2a02a7d5d4682b76538ed2a2e0633199aa395e60ecc72\\\"\"" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" podUID="8041f454-2294-478d-b4cd-ffa769b8f709" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.954346 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" event={"ID":"7d7c0a51-805f-422d-bb62-75a53f9a80d0","Type":"ContainerStarted","Data":"7d642d49ef766c2e386feccd1e6a9b2aad512405e45b862012fd3a1a62f29dec"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.954989 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.964997 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" event={"ID":"d7665104-7dc0-4450-9d2a-85d514354c9e","Type":"ContainerStarted","Data":"cd5dce32afbfacc48fd51c15408a50c4a9e3eaf78d7b2b731c2a0be177ef0bf3"} Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.965645 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.975069 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" event={"ID":"6cf94630-4019-4863-80be-6e1088cf3407","Type":"ContainerStarted","Data":"fa429f289d3cf989b3760134383c05620102263d89838d2cdf0dcbd24d29bbdf"} Oct 02 11:09:58 crc kubenswrapper[4783]: E1002 11:09:58.977714 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:23fcec0642cbd40af10bca0c5d4e538662d21eda98d6dfec37c38b4d7a47191a\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" podUID="6cf94630-4019-4863-80be-6e1088cf3407" Oct 02 11:09:58 crc kubenswrapper[4783]: I1002 11:09:58.977803 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" event={"ID":"1f8c1065-586c-4dce-a4ce-fc262d00063d","Type":"ContainerStarted","Data":"91c61676851582d24684d0e8bda1960bda4ae31fc550078cf7148e004a062d36"} Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.046056 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" podStartSLOduration=4.625574518 podStartE2EDuration="27.04604179s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.22151046 +0000 UTC m=+1008.537704721" lastFinishedPulling="2025-10-02 11:09:57.641977732 +0000 UTC m=+1030.958171993" observedRunningTime="2025-10-02 11:09:59.023756603 +0000 UTC m=+1032.339950864" watchObservedRunningTime="2025-10-02 11:09:59.04604179 +0000 UTC m=+1032.362236051" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.047098 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" podStartSLOduration=4.732118221 podStartE2EDuration="27.047092969s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.288174006 +0000 UTC m=+1008.604368267" lastFinishedPulling="2025-10-02 11:09:57.603148754 +0000 UTC m=+1030.919343015" observedRunningTime="2025-10-02 11:09:58.988349559 +0000 UTC m=+1032.304543810" watchObservedRunningTime="2025-10-02 11:09:59.047092969 +0000 UTC m=+1032.363287230" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.189844 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" podStartSLOduration=4.767856804 podStartE2EDuration="27.189826166s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.280858397 +0000 UTC m=+1008.597052658" lastFinishedPulling="2025-10-02 11:09:57.702827769 +0000 UTC m=+1031.019022020" observedRunningTime="2025-10-02 11:09:59.186184957 +0000 UTC m=+1032.502379218" watchObservedRunningTime="2025-10-02 11:09:59.189826166 +0000 UTC m=+1032.506020427" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.299875 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf" podStartSLOduration=3.801002946 podStartE2EDuration="26.299860103s" podCreationTimestamp="2025-10-02 11:09:33 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.039823292 +0000 UTC m=+1008.356017553" lastFinishedPulling="2025-10-02 11:09:57.538680449 +0000 UTC m=+1030.854874710" observedRunningTime="2025-10-02 11:09:59.260579443 +0000 UTC m=+1032.576773704" watchObservedRunningTime="2025-10-02 11:09:59.299860103 +0000 UTC m=+1032.616054364" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.403826 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" podStartSLOduration=4.990540038 podStartE2EDuration="27.403812224s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.125452344 +0000 UTC m=+1008.441646605" lastFinishedPulling="2025-10-02 11:09:57.53872453 +0000 UTC m=+1030.854918791" observedRunningTime="2025-10-02 11:09:59.400968086 +0000 UTC m=+1032.717162347" watchObservedRunningTime="2025-10-02 11:09:59.403812224 +0000 UTC m=+1032.720006485" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.986659 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" event={"ID":"ac5ac6e0-2521-4daa-8c0e-091f13b7a406","Type":"ContainerStarted","Data":"1a77574ae037014277ceef0278393b04c33219df3fb7b949e6dfd30a3f73b561"} Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.987583 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.989135 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" event={"ID":"0a24bee6-3dd0-47b3-881a-b5cff49d1e1e","Type":"ContainerStarted","Data":"4f4c4e172d5cd501d901a88045a0fecc7873820a86e10fd98a49e09cc423a464"} Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.989530 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.990972 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" event={"ID":"1f8c1065-586c-4dce-a4ce-fc262d00063d","Type":"ContainerStarted","Data":"e93efb8aa8eda00114a282fcb8296a8a05bdcea4c032e4a7c5fd8c9416d4b645"} Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.991352 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.992677 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" event={"ID":"7d7c0a51-805f-422d-bb62-75a53f9a80d0","Type":"ContainerStarted","Data":"e0bd5a04f4e92f202a746b738888f6abfc962c03490ff89d2f2794bff30894b9"} Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.994822 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" event={"ID":"c788b295-31bf-496b-8b4f-fccc3ff3be17","Type":"ContainerStarted","Data":"4b6483be22e9d58cd207f85d391f02d29fb639e73e27c52be9423da1874bb5b3"} Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.995187 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.997455 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" event={"ID":"650ecd19-902d-48f8-bea4-2c7f120885dc","Type":"ContainerStarted","Data":"624907f49421077c5a4c6528421d580608f072b786226cb2403db125b9989a68"} Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.997595 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" Oct 02 11:09:59 crc kubenswrapper[4783]: I1002 11:09:59.999244 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" event={"ID":"96ca8cc4-c237-43b0-ae6b-7cc86a183f46","Type":"ContainerStarted","Data":"26ee0448d8682b143375f2d7d6020f6964d416561fba49a215b1ec58b8c79575"} Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.001513 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/designate-operator@sha256:f6b935f67979298c3c263ad84d277e5cf26c0dbba3f85f255c1ec4d1d75241d2\\\"\"" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" podUID="52d93c12-b942-4ad3-935a-b555026711ea" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.002867 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:23fcec0642cbd40af10bca0c5d4e538662d21eda98d6dfec37c38b4d7a47191a\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" podUID="6cf94630-4019-4863-80be-6e1088cf3407" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.002905 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/octavia-operator@sha256:e1328760310f3bbf4548b8b1268cd711087dd91212b92bb0be287cad1f1b6fe9\\\"\"" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" podUID="139b1abc-ddcf-4ccc-83f4-deb58a682b0c" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.002938 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/glance-operator@sha256:21792a2317c0a55e40b2a02a7d5d4682b76538ed2a2e0633199aa395e60ecc72\\\"\"" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" podUID="8041f454-2294-478d-b4cd-ffa769b8f709" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.002973 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/horizon-operator@sha256:f5f0d2eb534f763cf6578af513add1c21c1659b2cd75214dfddfedb9eebf6397\\\"\"" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" podUID="ae698aff-8888-48b6-9c37-cbcea6e7bc6e" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.003000 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:acdeebaa51f962066f42f38b6c2d34a62fc6a24f58f9ee63d61b1e0cafbb29f8\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" podUID="a57ed846-184b-49fd-af9f-efc1fbba98e5" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.003026 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:1051afc168038fb814f75e7a5f07c588b295a83ebd143dcd8b46d799e31ad302\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" podUID="81c143d8-5f4b-4baf-9cb1-6f34110f4833" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.003298 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-baremetal-operator@sha256:e7cfed051c1cf801e651fd4035070e38698039f284ac0b2a0332769fdbb4a9c8\\\"\"" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" podUID="ebfbf228-ed71-4331-96be-8105d5029d2c" Oct 02 11:10:00 crc kubenswrapper[4783]: E1002 11:10:00.003358 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:917e6dcc519277c46e42898bc9f0f066790fa7b9633fcde668cc8a68a547c13c\\\"\"" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" podUID="24d5bc39-4bbb-47af-94e2-222118ccdabb" Oct 02 11:10:00 crc kubenswrapper[4783]: I1002 11:10:00.044129 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" podStartSLOduration=3.787172136 podStartE2EDuration="28.044112972s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:33.292111935 +0000 UTC m=+1006.608306196" lastFinishedPulling="2025-10-02 11:09:57.549052771 +0000 UTC m=+1030.865247032" observedRunningTime="2025-10-02 11:10:00.0436746 +0000 UTC m=+1033.359868861" watchObservedRunningTime="2025-10-02 11:10:00.044112972 +0000 UTC m=+1033.360307233" Oct 02 11:10:00 crc kubenswrapper[4783]: I1002 11:10:00.047757 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" podStartSLOduration=5.037763634 podStartE2EDuration="28.047742491s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:34.469953922 +0000 UTC m=+1007.786148183" lastFinishedPulling="2025-10-02 11:09:57.479932779 +0000 UTC m=+1030.796127040" observedRunningTime="2025-10-02 11:09:59.650612715 +0000 UTC m=+1032.966806976" watchObservedRunningTime="2025-10-02 11:10:00.047742491 +0000 UTC m=+1033.363936752" Oct 02 11:10:00 crc kubenswrapper[4783]: I1002 11:10:00.173762 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" podStartSLOduration=5.691398285 podStartE2EDuration="28.173743832s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.056936318 +0000 UTC m=+1008.373130579" lastFinishedPulling="2025-10-02 11:09:57.539281865 +0000 UTC m=+1030.855476126" observedRunningTime="2025-10-02 11:10:00.166469984 +0000 UTC m=+1033.482664245" watchObservedRunningTime="2025-10-02 11:10:00.173743832 +0000 UTC m=+1033.489938093" Oct 02 11:10:00 crc kubenswrapper[4783]: I1002 11:10:00.304093 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" podStartSLOduration=5.829095225 podStartE2EDuration="28.304078132s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.064393791 +0000 UTC m=+1008.380588052" lastFinishedPulling="2025-10-02 11:09:57.539376698 +0000 UTC m=+1030.855570959" observedRunningTime="2025-10-02 11:10:00.301492862 +0000 UTC m=+1033.617687123" watchObservedRunningTime="2025-10-02 11:10:00.304078132 +0000 UTC m=+1033.620272393" Oct 02 11:10:00 crc kubenswrapper[4783]: I1002 11:10:00.388459 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" podStartSLOduration=5.606828852 podStartE2EDuration="28.388444029s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:34.768863373 +0000 UTC m=+1008.085057634" lastFinishedPulling="2025-10-02 11:09:57.55047855 +0000 UTC m=+1030.866672811" observedRunningTime="2025-10-02 11:10:00.349766986 +0000 UTC m=+1033.665961247" watchObservedRunningTime="2025-10-02 11:10:00.388444029 +0000 UTC m=+1033.704638280" Oct 02 11:10:00 crc kubenswrapper[4783]: I1002 11:10:00.390321 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" podStartSLOduration=5.591870924 podStartE2EDuration="28.39031539s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:34.760102394 +0000 UTC m=+1008.076296655" lastFinishedPulling="2025-10-02 11:09:57.55854686 +0000 UTC m=+1030.874741121" observedRunningTime="2025-10-02 11:10:00.385783907 +0000 UTC m=+1033.701978158" watchObservedRunningTime="2025-10-02 11:10:00.39031539 +0000 UTC m=+1033.706509651" Oct 02 11:10:00 crc kubenswrapper[4783]: I1002 11:10:00.431082 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" podStartSLOduration=5.95925703 podStartE2EDuration="28.43106501s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.085532587 +0000 UTC m=+1008.401726848" lastFinishedPulling="2025-10-02 11:09:57.557340567 +0000 UTC m=+1030.873534828" observedRunningTime="2025-10-02 11:10:00.429274051 +0000 UTC m=+1033.745468312" watchObservedRunningTime="2025-10-02 11:10:00.43106501 +0000 UTC m=+1033.747259261" Oct 02 11:10:01 crc kubenswrapper[4783]: I1002 11:10:01.005369 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.043214 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-88c7-w5766" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.088653 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-6d68dbc695-qg9vc" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.108676 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-64cd67b5cb-kfxk9" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.259467 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-589c58c6c-qkgtm" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.357966 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-84d6b4b759-9fvns" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.385743 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-b8d54b5d7-b5rx5" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.501245 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-85777745bb-5b98j" Oct 02 11:10:03 crc kubenswrapper[4783]: I1002 11:10:03.569714 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-6b9957f54f-pwg7t" Oct 02 11:10:04 crc kubenswrapper[4783]: I1002 11:10:04.036957 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" event={"ID":"87d34dfd-25c7-4f4c-bbda-058e38a01994","Type":"ContainerStarted","Data":"c7572c8959d8e2e8208839a1bd6821d53209c7108ed7672b76af6a749d64244c"} Oct 02 11:10:04 crc kubenswrapper[4783]: I1002 11:10:04.037225 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" Oct 02 11:10:04 crc kubenswrapper[4783]: I1002 11:10:04.053009 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" podStartSLOduration=2.9806253099999998 podStartE2EDuration="32.052991681s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:33.936669258 +0000 UTC m=+1007.252863519" lastFinishedPulling="2025-10-02 11:10:03.009035629 +0000 UTC m=+1036.325229890" observedRunningTime="2025-10-02 11:10:04.050537444 +0000 UTC m=+1037.366731715" watchObservedRunningTime="2025-10-02 11:10:04.052991681 +0000 UTC m=+1037.369185952" Oct 02 11:10:04 crc kubenswrapper[4783]: I1002 11:10:04.433989 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-9d6c5db85-6bk6b" Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.094246 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" event={"ID":"81c143d8-5f4b-4baf-9cb1-6f34110f4833","Type":"ContainerStarted","Data":"6fc000527bcc679e3401e52cd1ece11a921dabfab583c3383bafa88d8f4edca9"} Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.095062 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.097653 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" event={"ID":"ebfbf228-ed71-4331-96be-8105d5029d2c","Type":"ContainerStarted","Data":"28c67692d761d2a21f7ead0ab11d63c77747c068b48fd2ed5f0bea67b50a460a"} Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.097839 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.113114 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" podStartSLOduration=3.468935849 podStartE2EDuration="40.113093251s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.186495937 +0000 UTC m=+1008.502690198" lastFinishedPulling="2025-10-02 11:10:11.830653339 +0000 UTC m=+1045.146847600" observedRunningTime="2025-10-02 11:10:12.109626686 +0000 UTC m=+1045.425820947" watchObservedRunningTime="2025-10-02 11:10:12.113093251 +0000 UTC m=+1045.429287512" Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.139202 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" podStartSLOduration=3.939867655 podStartE2EDuration="40.139184191s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.627552909 +0000 UTC m=+1008.943747170" lastFinishedPulling="2025-10-02 11:10:11.826869445 +0000 UTC m=+1045.143063706" observedRunningTime="2025-10-02 11:10:12.136007345 +0000 UTC m=+1045.452201606" watchObservedRunningTime="2025-10-02 11:10:12.139184191 +0000 UTC m=+1045.455378472" Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.532585 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-644bddb6d8-fkhwr" Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.556661 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-6ff8b75857-x7698" Oct 02 11:10:12 crc kubenswrapper[4783]: I1002 11:10:12.953994 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-5cd4858477-hs7gk" Oct 02 11:10:13 crc kubenswrapper[4783]: I1002 11:10:13.104739 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" event={"ID":"52d93c12-b942-4ad3-935a-b555026711ea","Type":"ContainerStarted","Data":"49ee7e113b6b3a1ba379b1b5b1712b16ff04b92e769d0974969288eabaf5349e"} Oct 02 11:10:13 crc kubenswrapper[4783]: I1002 11:10:13.105018 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" Oct 02 11:10:13 crc kubenswrapper[4783]: I1002 11:10:13.123538 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" podStartSLOduration=2.723733532 podStartE2EDuration="41.123515068s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:34.055234717 +0000 UTC m=+1007.371428978" lastFinishedPulling="2025-10-02 11:10:12.455016253 +0000 UTC m=+1045.771210514" observedRunningTime="2025-10-02 11:10:13.118375048 +0000 UTC m=+1046.434569309" watchObservedRunningTime="2025-10-02 11:10:13.123515068 +0000 UTC m=+1046.439709329" Oct 02 11:10:14 crc kubenswrapper[4783]: I1002 11:10:14.114763 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" event={"ID":"139b1abc-ddcf-4ccc-83f4-deb58a682b0c","Type":"ContainerStarted","Data":"3236660aee9192bbdb2fbc37ef4bbd6cbec4bcc3c7608f570df3f3903a4117d5"} Oct 02 11:10:14 crc kubenswrapper[4783]: I1002 11:10:14.115302 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" Oct 02 11:10:14 crc kubenswrapper[4783]: I1002 11:10:14.118359 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" event={"ID":"24d5bc39-4bbb-47af-94e2-222118ccdabb","Type":"ContainerStarted","Data":"69669cb5e5f3ad0b260f520044ba58343c10e589726f0fd4a983fbc4c4e06a52"} Oct 02 11:10:14 crc kubenswrapper[4783]: I1002 11:10:14.137495 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" podStartSLOduration=3.693657499 podStartE2EDuration="42.137465222s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:35.338337562 +0000 UTC m=+1008.654531823" lastFinishedPulling="2025-10-02 11:10:13.782145245 +0000 UTC m=+1047.098339546" observedRunningTime="2025-10-02 11:10:14.133082953 +0000 UTC m=+1047.449277214" watchObservedRunningTime="2025-10-02 11:10:14.137465222 +0000 UTC m=+1047.453659483" Oct 02 11:10:14 crc kubenswrapper[4783]: I1002 11:10:14.154386 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" podStartSLOduration=2.371015197 podStartE2EDuration="42.154369302s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:33.99658725 +0000 UTC m=+1007.312781511" lastFinishedPulling="2025-10-02 11:10:13.779941325 +0000 UTC m=+1047.096135616" observedRunningTime="2025-10-02 11:10:14.150776915 +0000 UTC m=+1047.466971186" watchObservedRunningTime="2025-10-02 11:10:14.154369302 +0000 UTC m=+1047.470563583" Oct 02 11:10:15 crc kubenswrapper[4783]: I1002 11:10:15.128694 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" event={"ID":"a57ed846-184b-49fd-af9f-efc1fbba98e5","Type":"ContainerStarted","Data":"96dfc0ad44b08710cb80fa2c1aff28b872c48170db60f0dd5d8dc48a90f18369"} Oct 02 11:10:15 crc kubenswrapper[4783]: I1002 11:10:15.129110 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" Oct 02 11:10:15 crc kubenswrapper[4783]: I1002 11:10:15.151306 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" podStartSLOduration=3.3091879779999998 podStartE2EDuration="43.151277443s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:34.759781825 +0000 UTC m=+1008.075976086" lastFinishedPulling="2025-10-02 11:10:14.60187128 +0000 UTC m=+1047.918065551" observedRunningTime="2025-10-02 11:10:15.148921009 +0000 UTC m=+1048.465115280" watchObservedRunningTime="2025-10-02 11:10:15.151277443 +0000 UTC m=+1048.467471744" Oct 02 11:10:16 crc kubenswrapper[4783]: I1002 11:10:16.137785 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" event={"ID":"6cf94630-4019-4863-80be-6e1088cf3407","Type":"ContainerStarted","Data":"3ba7a421b83a2e4a397ce53aad9368c491c6880c5cba772c9884546ccef8942a"} Oct 02 11:10:16 crc kubenswrapper[4783]: I1002 11:10:16.138453 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" Oct 02 11:10:16 crc kubenswrapper[4783]: I1002 11:10:16.139534 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" event={"ID":"ae698aff-8888-48b6-9c37-cbcea6e7bc6e","Type":"ContainerStarted","Data":"c4b8119afb42d18f7623140180d9749b9d2adcdabbfbc3bc0f2a1bcf73e1e8b1"} Oct 02 11:10:16 crc kubenswrapper[4783]: I1002 11:10:16.139852 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" Oct 02 11:10:16 crc kubenswrapper[4783]: I1002 11:10:16.158985 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" podStartSLOduration=3.380398277 podStartE2EDuration="44.158967556s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:34.470325422 +0000 UTC m=+1007.786519683" lastFinishedPulling="2025-10-02 11:10:15.248894701 +0000 UTC m=+1048.565088962" observedRunningTime="2025-10-02 11:10:16.155308247 +0000 UTC m=+1049.471502508" watchObservedRunningTime="2025-10-02 11:10:16.158967556 +0000 UTC m=+1049.475161817" Oct 02 11:10:16 crc kubenswrapper[4783]: I1002 11:10:16.175052 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" podStartSLOduration=3.152369247 podStartE2EDuration="44.175031634s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:33.994100062 +0000 UTC m=+1007.310294323" lastFinishedPulling="2025-10-02 11:10:15.016762459 +0000 UTC m=+1048.332956710" observedRunningTime="2025-10-02 11:10:16.171456417 +0000 UTC m=+1049.487650678" watchObservedRunningTime="2025-10-02 11:10:16.175031634 +0000 UTC m=+1049.491225905" Oct 02 11:10:17 crc kubenswrapper[4783]: I1002 11:10:17.148953 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" event={"ID":"8041f454-2294-478d-b4cd-ffa769b8f709","Type":"ContainerStarted","Data":"42efadd321da50330faad667567a5cb3cb509df4d5b6b6267588b83e10c107d4"} Oct 02 11:10:17 crc kubenswrapper[4783]: I1002 11:10:17.171104 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" podStartSLOduration=3.242543462 podStartE2EDuration="45.17108783s" podCreationTimestamp="2025-10-02 11:09:32 +0000 UTC" firstStartedPulling="2025-10-02 11:09:34.117601046 +0000 UTC m=+1007.433795307" lastFinishedPulling="2025-10-02 11:10:16.046145414 +0000 UTC m=+1049.362339675" observedRunningTime="2025-10-02 11:10:17.166036273 +0000 UTC m=+1050.482230524" watchObservedRunningTime="2025-10-02 11:10:17.17108783 +0000 UTC m=+1050.487282081" Oct 02 11:10:22 crc kubenswrapper[4783]: I1002 11:10:22.577584 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-84f4f7b77b-wt52p" Oct 02 11:10:22 crc kubenswrapper[4783]: I1002 11:10:22.870118 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-9f4696d94-9pqks" Oct 02 11:10:22 crc kubenswrapper[4783]: I1002 11:10:22.916724 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" Oct 02 11:10:22 crc kubenswrapper[4783]: I1002 11:10:22.918322 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-84958c4d49-92cr8" Oct 02 11:10:22 crc kubenswrapper[4783]: I1002 11:10:22.948706 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" Oct 02 11:10:22 crc kubenswrapper[4783]: I1002 11:10:22.955700 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-5d889d78cf-pqhp6" Oct 02 11:10:22 crc kubenswrapper[4783]: I1002 11:10:22.971891 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-5bd55b4bff-qtqcj" Oct 02 11:10:23 crc kubenswrapper[4783]: I1002 11:10:23.150300 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-849d5b9b84-6xwmm" Oct 02 11:10:23 crc kubenswrapper[4783]: I1002 11:10:23.221365 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7b787867f4-k7m89" Oct 02 11:10:23 crc kubenswrapper[4783]: I1002 11:10:23.269077 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-9976ff44c-xbh4h" Oct 02 11:10:24 crc kubenswrapper[4783]: I1002 11:10:24.834587 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-5869cb545-4pwjh" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.884634 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bkb5"] Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.887436 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.890889 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.890982 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-69t8j" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.893404 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.896304 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.900897 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bkb5"] Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.949180 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wg2cc"] Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.950731 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.956694 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Oct 02 11:10:42 crc kubenswrapper[4783]: I1002 11:10:42.971649 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wg2cc"] Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.010822 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hqg79\" (UniqueName: \"kubernetes.io/projected/b5dacaee-2905-4af8-b754-54fb4d6091f5-kube-api-access-hqg79\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.010909 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-config\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.010967 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8adde51-a6cc-4ca0-ba60-76030ddb125d-config\") pod \"dnsmasq-dns-675f4bcbfc-4bkb5\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.010991 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.011049 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g9v5g\" (UniqueName: \"kubernetes.io/projected/a8adde51-a6cc-4ca0-ba60-76030ddb125d-kube-api-access-g9v5g\") pod \"dnsmasq-dns-675f4bcbfc-4bkb5\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.112993 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hqg79\" (UniqueName: \"kubernetes.io/projected/b5dacaee-2905-4af8-b754-54fb4d6091f5-kube-api-access-hqg79\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.113060 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-config\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.113090 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8adde51-a6cc-4ca0-ba60-76030ddb125d-config\") pod \"dnsmasq-dns-675f4bcbfc-4bkb5\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.113113 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.113145 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g9v5g\" (UniqueName: \"kubernetes.io/projected/a8adde51-a6cc-4ca0-ba60-76030ddb125d-kube-api-access-g9v5g\") pod \"dnsmasq-dns-675f4bcbfc-4bkb5\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.114162 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-config\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.114164 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8adde51-a6cc-4ca0-ba60-76030ddb125d-config\") pod \"dnsmasq-dns-675f4bcbfc-4bkb5\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.114335 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-dns-svc\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.137581 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g9v5g\" (UniqueName: \"kubernetes.io/projected/a8adde51-a6cc-4ca0-ba60-76030ddb125d-kube-api-access-g9v5g\") pod \"dnsmasq-dns-675f4bcbfc-4bkb5\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.138353 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hqg79\" (UniqueName: \"kubernetes.io/projected/b5dacaee-2905-4af8-b754-54fb4d6091f5-kube-api-access-hqg79\") pod \"dnsmasq-dns-78dd6ddcc-wg2cc\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.209251 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.270445 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.700533 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bkb5"] Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.710229 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:10:43 crc kubenswrapper[4783]: I1002 11:10:43.808727 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wg2cc"] Oct 02 11:10:43 crc kubenswrapper[4783]: W1002 11:10:43.812729 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5dacaee_2905_4af8_b754_54fb4d6091f5.slice/crio-1657a553c4498f736397a3a9f56e94d0079ea318693eb658c92514b12d86bc19 WatchSource:0}: Error finding container 1657a553c4498f736397a3a9f56e94d0079ea318693eb658c92514b12d86bc19: Status 404 returned error can't find the container with id 1657a553c4498f736397a3a9f56e94d0079ea318693eb658c92514b12d86bc19 Oct 02 11:10:44 crc kubenswrapper[4783]: I1002 11:10:44.369361 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" event={"ID":"a8adde51-a6cc-4ca0-ba60-76030ddb125d","Type":"ContainerStarted","Data":"07d60e089697e8c038e60f201d3b29f733103a218cdff2d141769bdea69c265f"} Oct 02 11:10:44 crc kubenswrapper[4783]: I1002 11:10:44.371269 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" event={"ID":"b5dacaee-2905-4af8-b754-54fb4d6091f5","Type":"ContainerStarted","Data":"1657a553c4498f736397a3a9f56e94d0079ea318693eb658c92514b12d86bc19"} Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.117838 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bkb5"] Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.145122 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-6gwjh"] Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.146191 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.169674 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-config\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.169752 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.169814 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6kn4l\" (UniqueName: \"kubernetes.io/projected/897fe4b3-a755-4281-8f7d-c64d3551f950-kube-api-access-6kn4l\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.256507 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-6gwjh"] Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.271782 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-config\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.271835 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.271887 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6kn4l\" (UniqueName: \"kubernetes.io/projected/897fe4b3-a755-4281-8f7d-c64d3551f950-kube-api-access-6kn4l\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.280459 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-dns-svc\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.282355 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-config\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.326032 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6kn4l\" (UniqueName: \"kubernetes.io/projected/897fe4b3-a755-4281-8f7d-c64d3551f950-kube-api-access-6kn4l\") pod \"dnsmasq-dns-5ccc8479f9-6gwjh\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.483818 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.699326 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wg2cc"] Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.774610 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-tmt9k"] Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.775752 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.824866 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-tmt9k"] Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.889816 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.889866 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9vtn\" (UniqueName: \"kubernetes.io/projected/8ed908e0-5c5b-4246-9192-17d4b995650f-kube-api-access-f9vtn\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.889907 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-config\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.991618 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9vtn\" (UniqueName: \"kubernetes.io/projected/8ed908e0-5c5b-4246-9192-17d4b995650f-kube-api-access-f9vtn\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.991954 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-config\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.992795 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-config\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.992935 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:46 crc kubenswrapper[4783]: I1002 11:10:46.993487 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-dns-svc\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.018004 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9vtn\" (UniqueName: \"kubernetes.io/projected/8ed908e0-5c5b-4246-9192-17d4b995650f-kube-api-access-f9vtn\") pod \"dnsmasq-dns-57d769cc4f-tmt9k\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.111875 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.426354 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.447795 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.453601 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.454440 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.454559 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.454599 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.454705 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.454897 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-mb4cz" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.455055 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.455836 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.582058 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-6gwjh"] Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612596 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612649 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612671 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612690 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b3b5c94-1a3b-4486-9247-724deab20d81-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612737 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b3b5c94-1a3b-4486-9247-724deab20d81-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612752 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612779 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvdtg\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-kube-api-access-nvdtg\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612821 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612838 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612904 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.612936 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: W1002 11:10:47.637233 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod897fe4b3_a755_4281_8f7d_c64d3551f950.slice/crio-08223a1c44f187fc6f1697c463d50898995d33c1665f078884ae7f59d9621608 WatchSource:0}: Error finding container 08223a1c44f187fc6f1697c463d50898995d33c1665f078884ae7f59d9621608: Status 404 returned error can't find the container with id 08223a1c44f187fc6f1697c463d50898995d33c1665f078884ae7f59d9621608 Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.714359 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b3b5c94-1a3b-4486-9247-724deab20d81-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.714425 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b3b5c94-1a3b-4486-9247-724deab20d81-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.714440 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.714462 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvdtg\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-kube-api-access-nvdtg\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.715161 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.715181 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.715481 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.715766 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.715852 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.715932 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.715982 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.716002 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.716574 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.716598 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.716772 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.717485 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.719313 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.722067 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.724377 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.727101 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b3b5c94-1a3b-4486-9247-724deab20d81-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.735577 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvdtg\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-kube-api-access-nvdtg\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.736019 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b3b5c94-1a3b-4486-9247-724deab20d81-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.739315 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.774772 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.989850 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:10:47 crc kubenswrapper[4783]: I1002 11:10:47.991707 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:47.997751 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:47.997896 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:47.998022 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9br7t" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:47.998054 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:47.998227 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:47.998252 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:47.998227 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.006110 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.073403 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-tmt9k"] Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123135 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123206 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123233 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123259 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123279 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-config-data\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123309 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123339 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123370 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hxxkz\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-kube-api-access-hxxkz\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123404 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123507 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.123538 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227702 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227739 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227754 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227774 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-config-data\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227807 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227829 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227865 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hxxkz\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-kube-api-access-hxxkz\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227889 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227905 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227922 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.227971 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.228947 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.229473 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-server-conf\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.230002 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.229089 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.234793 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.235179 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-pod-info\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.235984 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-config-data\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.236944 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.238997 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.239378 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.247674 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hxxkz\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-kube-api-access-hxxkz\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.275157 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.332706 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.420067 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" event={"ID":"897fe4b3-a755-4281-8f7d-c64d3551f950","Type":"ContainerStarted","Data":"08223a1c44f187fc6f1697c463d50898995d33c1665f078884ae7f59d9621608"} Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.422022 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" event={"ID":"8ed908e0-5c5b-4246-9192-17d4b995650f","Type":"ContainerStarted","Data":"bd580ef726b45584f0e7e945cac141bf3a8e384b9bee9de0d3b596855333b260"} Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.445339 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:10:48 crc kubenswrapper[4783]: W1002 11:10:48.455124 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b3b5c94_1a3b_4486_9247_724deab20d81.slice/crio-e9ed8dab9d29705b02dc4d225d69c76075ffd3e4e1d2155d74ce90bdab21ab64 WatchSource:0}: Error finding container e9ed8dab9d29705b02dc4d225d69c76075ffd3e4e1d2155d74ce90bdab21ab64: Status 404 returned error can't find the container with id e9ed8dab9d29705b02dc4d225d69c76075ffd3e4e1d2155d74ce90bdab21ab64 Oct 02 11:10:48 crc kubenswrapper[4783]: I1002 11:10:48.611526 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.210550 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.212003 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.214853 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.219986 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.220263 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-9bw4p" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.220373 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.220591 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.220692 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.235735 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343314 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-kolla-config\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343363 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343389 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343424 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4mf7\" (UniqueName: \"kubernetes.io/projected/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-kube-api-access-f4mf7\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343457 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-secrets\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343484 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343517 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-config-data-default\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343533 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.343550 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.431494 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b3b5c94-1a3b-4486-9247-724deab20d81","Type":"ContainerStarted","Data":"e9ed8dab9d29705b02dc4d225d69c76075ffd3e4e1d2155d74ce90bdab21ab64"} Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.433122 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c","Type":"ContainerStarted","Data":"acae24ec679a88645e8aecf4c76a519ee80a4c31a10a590fb4b09c36af990cdc"} Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449101 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-config-data-default\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449153 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449169 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449215 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-kolla-config\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449236 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449267 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449306 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4mf7\" (UniqueName: \"kubernetes.io/projected/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-kube-api-access-f4mf7\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449333 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-secrets\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.449356 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.450296 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.450551 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-kolla-config\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.451279 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-config-data-default\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.452370 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-operator-scripts\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.452892 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-config-data-generated\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.453709 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.470500 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.472820 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-secrets\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.473103 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4mf7\" (UniqueName: \"kubernetes.io/projected/b969c99b-7cd2-413c-b9ea-4b0fc855fb66-kube-api-access-f4mf7\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.484498 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"openstack-galera-0\" (UID: \"b969c99b-7cd2-413c-b9ea-4b0fc855fb66\") " pod="openstack/openstack-galera-0" Oct 02 11:10:49 crc kubenswrapper[4783]: I1002 11:10:49.543675 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.070801 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.179836 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.181791 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.184491 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-frqvf" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.188327 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.190077 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.190242 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.190758 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364343 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364395 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364427 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c174112-c7e3-43b2-b794-a085b9565b90-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364472 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364518 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364561 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364579 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364597 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-67mnv\" (UniqueName: \"kubernetes.io/projected/8c174112-c7e3-43b2-b794-a085b9565b90-kube-api-access-67mnv\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.364647 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.461040 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b969c99b-7cd2-413c-b9ea-4b0fc855fb66","Type":"ContainerStarted","Data":"274fb30d00843048745f5ff99543e4fea0d0cf52de9a008feff3f76859c9071b"} Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465694 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465740 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465762 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c174112-c7e3-43b2-b794-a085b9565b90-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465816 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465863 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465883 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465901 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465917 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-67mnv\" (UniqueName: \"kubernetes.io/projected/8c174112-c7e3-43b2-b794-a085b9565b90-kube-api-access-67mnv\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.465943 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.466686 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.466708 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.467890 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/8c174112-c7e3-43b2-b794-a085b9565b90-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.468687 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.469369 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/8c174112-c7e3-43b2-b794-a085b9565b90-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.496812 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.496979 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.497135 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secrets\" (UniqueName: \"kubernetes.io/secret/8c174112-c7e3-43b2-b794-a085b9565b90-secrets\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.507372 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.520321 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-67mnv\" (UniqueName: \"kubernetes.io/projected/8c174112-c7e3-43b2-b794-a085b9565b90-kube-api-access-67mnv\") pod \"openstack-cell1-galera-0\" (UID: \"8c174112-c7e3-43b2-b794-a085b9565b90\") " pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.630261 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.631207 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.633445 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-mtn25" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.634087 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.635471 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.643080 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.770340 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b5af20-9931-4086-a593-8c0090ce8c12-config-data\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.770398 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t9ps9\" (UniqueName: \"kubernetes.io/projected/f7b5af20-9931-4086-a593-8c0090ce8c12-kube-api-access-t9ps9\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.770439 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f7b5af20-9931-4086-a593-8c0090ce8c12-kolla-config\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.770568 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b5af20-9931-4086-a593-8c0090ce8c12-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.770641 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7b5af20-9931-4086-a593-8c0090ce8c12-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.817373 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.872295 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b5af20-9931-4086-a593-8c0090ce8c12-config-data\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.872351 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t9ps9\" (UniqueName: \"kubernetes.io/projected/f7b5af20-9931-4086-a593-8c0090ce8c12-kube-api-access-t9ps9\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.872398 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f7b5af20-9931-4086-a593-8c0090ce8c12-kolla-config\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.872453 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b5af20-9931-4086-a593-8c0090ce8c12-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.872669 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7b5af20-9931-4086-a593-8c0090ce8c12-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.873463 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/f7b5af20-9931-4086-a593-8c0090ce8c12-kolla-config\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.876925 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f7b5af20-9931-4086-a593-8c0090ce8c12-config-data\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.881464 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f7b5af20-9931-4086-a593-8c0090ce8c12-combined-ca-bundle\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.882498 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/f7b5af20-9931-4086-a593-8c0090ce8c12-memcached-tls-certs\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.945524 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t9ps9\" (UniqueName: \"kubernetes.io/projected/f7b5af20-9931-4086-a593-8c0090ce8c12-kube-api-access-t9ps9\") pod \"memcached-0\" (UID: \"f7b5af20-9931-4086-a593-8c0090ce8c12\") " pod="openstack/memcached-0" Oct 02 11:10:50 crc kubenswrapper[4783]: I1002 11:10:50.962971 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Oct 02 11:10:51 crc kubenswrapper[4783]: I1002 11:10:51.308933 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Oct 02 11:10:51 crc kubenswrapper[4783]: W1002 11:10:51.311395 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8c174112_c7e3_43b2_b794_a085b9565b90.slice/crio-db6e09a49fe7cca58ee2d19e8539d111635ae9d9424d62be8e67a3b39824bd5f WatchSource:0}: Error finding container db6e09a49fe7cca58ee2d19e8539d111635ae9d9424d62be8e67a3b39824bd5f: Status 404 returned error can't find the container with id db6e09a49fe7cca58ee2d19e8539d111635ae9d9424d62be8e67a3b39824bd5f Oct 02 11:10:51 crc kubenswrapper[4783]: I1002 11:10:51.469621 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8c174112-c7e3-43b2-b794-a085b9565b90","Type":"ContainerStarted","Data":"db6e09a49fe7cca58ee2d19e8539d111635ae9d9424d62be8e67a3b39824bd5f"} Oct 02 11:10:51 crc kubenswrapper[4783]: I1002 11:10:51.498787 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Oct 02 11:10:51 crc kubenswrapper[4783]: W1002 11:10:51.502868 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf7b5af20_9931_4086_a593_8c0090ce8c12.slice/crio-a8aef7a7aff27eb41718b8b817228dfc0782b6c6f1e30fbcb5adc6d881b5da44 WatchSource:0}: Error finding container a8aef7a7aff27eb41718b8b817228dfc0782b6c6f1e30fbcb5adc6d881b5da44: Status 404 returned error can't find the container with id a8aef7a7aff27eb41718b8b817228dfc0782b6c6f1e30fbcb5adc6d881b5da44 Oct 02 11:10:51 crc kubenswrapper[4783]: I1002 11:10:51.513385 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:10:51 crc kubenswrapper[4783]: I1002 11:10:51.513441 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.316215 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.317548 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.322267 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-prp4f" Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.330355 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.416546 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-242pt\" (UniqueName: \"kubernetes.io/projected/9d4e10e2-7f87-4ffc-9120-fa41d978cb4f-kube-api-access-242pt\") pod \"kube-state-metrics-0\" (UID: \"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f\") " pod="openstack/kube-state-metrics-0" Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.477961 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f7b5af20-9931-4086-a593-8c0090ce8c12","Type":"ContainerStarted","Data":"a8aef7a7aff27eb41718b8b817228dfc0782b6c6f1e30fbcb5adc6d881b5da44"} Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.518324 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-242pt\" (UniqueName: \"kubernetes.io/projected/9d4e10e2-7f87-4ffc-9120-fa41d978cb4f-kube-api-access-242pt\") pod \"kube-state-metrics-0\" (UID: \"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f\") " pod="openstack/kube-state-metrics-0" Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.547024 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-242pt\" (UniqueName: \"kubernetes.io/projected/9d4e10e2-7f87-4ffc-9120-fa41d978cb4f-kube-api-access-242pt\") pod \"kube-state-metrics-0\" (UID: \"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f\") " pod="openstack/kube-state-metrics-0" Oct 02 11:10:52 crc kubenswrapper[4783]: I1002 11:10:52.638335 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 02 11:10:53 crc kubenswrapper[4783]: I1002 11:10:53.266064 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:10:53 crc kubenswrapper[4783]: I1002 11:10:53.487169 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f","Type":"ContainerStarted","Data":"049e5d36d5768c1aca3f4c8aeb249c2b619c572969e74961f4700d2ae2402018"} Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.185703 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-k7v6f"] Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.187595 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.189774 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.189978 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-xvtlj" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.190172 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.196324 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-p5v7m"] Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.197811 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.217959 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k7v6f"] Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.224782 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-p5v7m"] Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.307858 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fa791a-e505-43ab-a361-897b21f24f89-ovn-controller-tls-certs\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.307902 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-log-ovn\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.307924 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-lib\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.307954 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/246fea45-d93e-4975-bc2e-818ec7eafa99-scripts\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.307972 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-run\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308020 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fa791a-e505-43ab-a361-897b21f24f89-combined-ca-bundle\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308039 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-etc-ovs\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308060 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-run-ovn\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308079 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-run\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308101 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-log\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308121 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fa791a-e505-43ab-a361-897b21f24f89-scripts\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308184 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsm4b\" (UniqueName: \"kubernetes.io/projected/c9fa791a-e505-43ab-a361-897b21f24f89-kube-api-access-xsm4b\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.308204 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6f25x\" (UniqueName: \"kubernetes.io/projected/246fea45-d93e-4975-bc2e-818ec7eafa99-kube-api-access-6f25x\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.409843 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fa791a-e505-43ab-a361-897b21f24f89-ovn-controller-tls-certs\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.409876 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-log-ovn\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.409897 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-lib\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.409924 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/246fea45-d93e-4975-bc2e-818ec7eafa99-scripts\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410020 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-run\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410054 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fa791a-e505-43ab-a361-897b21f24f89-combined-ca-bundle\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410071 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-etc-ovs\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410091 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-run-ovn\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410112 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-run\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410131 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-log\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410148 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fa791a-e505-43ab-a361-897b21f24f89-scripts\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410185 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsm4b\" (UniqueName: \"kubernetes.io/projected/c9fa791a-e505-43ab-a361-897b21f24f89-kube-api-access-xsm4b\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.410201 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6f25x\" (UniqueName: \"kubernetes.io/projected/246fea45-d93e-4975-bc2e-818ec7eafa99-kube-api-access-6f25x\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.411053 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-lib\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.411147 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-log-ovn\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.412130 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-run\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.412142 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-var-log\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.412181 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/246fea45-d93e-4975-bc2e-818ec7eafa99-etc-ovs\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.412145 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-run\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.413074 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/c9fa791a-e505-43ab-a361-897b21f24f89-var-run-ovn\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.414001 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/c9fa791a-e505-43ab-a361-897b21f24f89-scripts\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.414925 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/246fea45-d93e-4975-bc2e-818ec7eafa99-scripts\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.417005 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/c9fa791a-e505-43ab-a361-897b21f24f89-ovn-controller-tls-certs\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.420980 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c9fa791a-e505-43ab-a361-897b21f24f89-combined-ca-bundle\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.425248 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6f25x\" (UniqueName: \"kubernetes.io/projected/246fea45-d93e-4975-bc2e-818ec7eafa99-kube-api-access-6f25x\") pod \"ovn-controller-ovs-p5v7m\" (UID: \"246fea45-d93e-4975-bc2e-818ec7eafa99\") " pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.437265 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsm4b\" (UniqueName: \"kubernetes.io/projected/c9fa791a-e505-43ab-a361-897b21f24f89-kube-api-access-xsm4b\") pod \"ovn-controller-k7v6f\" (UID: \"c9fa791a-e505-43ab-a361-897b21f24f89\") " pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.514141 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f" Oct 02 11:10:57 crc kubenswrapper[4783]: I1002 11:10:57.522620 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.087793 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.092899 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.096289 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-fdzk8" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.096530 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.096651 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.096771 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.097116 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.104263 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220353 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-69vhd\" (UniqueName: \"kubernetes.io/projected/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-kube-api-access-69vhd\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220528 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220560 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220612 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220642 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-config\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220663 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220685 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.220709 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322007 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322274 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322295 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-config\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322312 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322334 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322750 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-69vhd\" (UniqueName: \"kubernetes.io/projected/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-kube-api-access-69vhd\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322793 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322811 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.322856 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.323351 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.323608 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-config\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.324060 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.328874 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.329084 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.334848 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.339064 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-69vhd\" (UniqueName: \"kubernetes.io/projected/f0a8aca8-fc00-4c0a-82b5-63fc50672f72-kube-api-access-69vhd\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.349550 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"ovsdbserver-sb-0\" (UID: \"f0a8aca8-fc00-4c0a-82b5-63fc50672f72\") " pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:58 crc kubenswrapper[4783]: I1002 11:10:58.421128 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.009493 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.012610 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.014834 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.015012 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-zn56k" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.015144 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.016045 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.017648 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.139694 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.139762 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.139784 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xjwf5\" (UniqueName: \"kubernetes.io/projected/627209ce-c546-42e5-b35e-ab8abd950ef8-kube-api-access-xjwf5\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.139804 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/627209ce-c546-42e5-b35e-ab8abd950ef8-config\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.139885 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/627209ce-c546-42e5-b35e-ab8abd950ef8-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.139953 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.140040 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/627209ce-c546-42e5-b35e-ab8abd950ef8-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.140080 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.243499 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/627209ce-c546-42e5-b35e-ab8abd950ef8-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.243563 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.243992 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/627209ce-c546-42e5-b35e-ab8abd950ef8-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.244016 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/627209ce-c546-42e5-b35e-ab8abd950ef8-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.244041 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.244080 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.244221 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.244357 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xjwf5\" (UniqueName: \"kubernetes.io/projected/627209ce-c546-42e5-b35e-ab8abd950ef8-kube-api-access-xjwf5\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.244397 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/627209ce-c546-42e5-b35e-ab8abd950ef8-config\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.245068 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/627209ce-c546-42e5-b35e-ab8abd950ef8-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.245708 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.246919 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/627209ce-c546-42e5-b35e-ab8abd950ef8-config\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.248843 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.254222 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.259370 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/627209ce-c546-42e5-b35e-ab8abd950ef8-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.262099 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xjwf5\" (UniqueName: \"kubernetes.io/projected/627209ce-c546-42e5-b35e-ab8abd950ef8-kube-api-access-xjwf5\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.264826 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"ovsdbserver-nb-0\" (UID: \"627209ce-c546-42e5-b35e-ab8abd950ef8\") " pod="openstack/ovsdbserver-nb-0" Oct 02 11:10:59 crc kubenswrapper[4783]: I1002 11:10:59.334938 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Oct 02 11:11:21 crc kubenswrapper[4783]: I1002 11:11:21.513870 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:11:21 crc kubenswrapper[4783]: I1002 11:11:21.515718 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.354197 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.354936 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hxxkz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.356627 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.363153 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.363262 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nvdtg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(3b3b5c94-1a3b-4486-9247-724deab20d81): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.364571 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.777820 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" Oct 02 11:11:22 crc kubenswrapper[4783]: E1002 11:11:22.778360 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq:current-podified\\\"\"" pod="openstack/rabbitmq-server-0" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" Oct 02 11:11:42 crc kubenswrapper[4783]: E1002 11:11:42.148849 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached:current-podified" Oct 02 11:11:42 crc kubenswrapper[4783]: E1002 11:11:42.149481 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached:current-podified,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n5bbh7ch547hbhf8h579h5d5h667h699hfch97hb4h657h75h555h664h6h68ch547hf8h77h5fch54h549hc7h66dh59fh85h88h5c8h565h645q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-t9ps9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(f7b5af20-9931-4086-a593-8c0090ce8c12): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:42 crc kubenswrapper[4783]: E1002 11:11:42.150698 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="f7b5af20-9931-4086-a593-8c0090ce8c12" Oct 02 11:11:42 crc kubenswrapper[4783]: E1002 11:11:42.940231 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached:current-podified\\\"\"" pod="openstack/memcached-0" podUID="f7b5af20-9931-4086-a593-8c0090ce8c12" Oct 02 11:11:43 crc kubenswrapper[4783]: E1002 11:11:43.972362 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Oct 02 11:11:43 crc kubenswrapper[4783]: E1002 11:11:43.973725 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f4mf7,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(b969c99b-7cd2-413c-b9ea-4b0fc855fb66): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:43 crc kubenswrapper[4783]: E1002 11:11:43.975371 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb:current-podified" Oct 02 11:11:43 crc kubenswrapper[4783]: E1002 11:11:43.975389 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="b969c99b-7cd2-413c-b9ea-4b0fc855fb66" Oct 02 11:11:43 crc kubenswrapper[4783]: E1002 11:11:43.975577 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb:current-podified,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:DB_ROOT_PASSWORD,Value:,ValueFrom:&EnvVarSource{FieldRef:nil,ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:&SecretKeySelector{LocalObjectReference:LocalObjectReference{Name:osp-secret,},Key:DbRootPassword,Optional:nil,},},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:secrets,ReadOnly:true,MountPath:/var/lib/secrets,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-67mnv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(8c174112-c7e3-43b2-b794-a085b9565b90): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:43 crc kubenswrapper[4783]: E1002 11:11:43.977293 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="8c174112-c7e3-43b2-b794-a085b9565b90" Oct 02 11:11:44 crc kubenswrapper[4783]: E1002 11:11:44.947094 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-galera-0" podUID="b969c99b-7cd2-413c-b9ea-4b0fc855fb66" Oct 02 11:11:44 crc kubenswrapper[4783]: E1002 11:11:44.947905 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb:current-podified\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="8c174112-c7e3-43b2-b794-a085b9565b90" Oct 02 11:11:51 crc kubenswrapper[4783]: I1002 11:11:51.513729 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:11:51 crc kubenswrapper[4783]: I1002 11:11:51.514557 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:11:51 crc kubenswrapper[4783]: I1002 11:11:51.514619 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:11:51 crc kubenswrapper[4783]: I1002 11:11:51.515645 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9eb9fd07e1e6e14820a34c4d9ea92acb1e0177338f9204e820b47da5ec49b7d3"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:11:51 crc kubenswrapper[4783]: I1002 11:11:51.515741 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://9eb9fd07e1e6e14820a34c4d9ea92acb1e0177338f9204e820b47da5ec49b7d3" gracePeriod=600 Oct 02 11:11:53 crc kubenswrapper[4783]: I1002 11:11:53.011973 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="9eb9fd07e1e6e14820a34c4d9ea92acb1e0177338f9204e820b47da5ec49b7d3" exitCode=0 Oct 02 11:11:53 crc kubenswrapper[4783]: I1002 11:11:53.012055 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"9eb9fd07e1e6e14820a34c4d9ea92acb1e0177338f9204e820b47da5ec49b7d3"} Oct 02 11:11:53 crc kubenswrapper[4783]: I1002 11:11:53.012358 4783 scope.go:117] "RemoveContainer" containerID="71430381fae101618ae61e001dc6e16fd7ea79336b50af0d653500a93eaed8e8" Oct 02 11:11:53 crc kubenswrapper[4783]: I1002 11:11:53.875165 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k7v6f"] Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.745891 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.746320 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g9v5g,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-675f4bcbfc-4bkb5_openstack(a8adde51-a6cc-4ca0-ba60-76030ddb125d): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.747943 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" podUID="a8adde51-a6cc-4ca0-ba60-76030ddb125d" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.982454 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.982640 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f9vtn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-57d769cc4f-tmt9k_openstack(8ed908e0-5c5b-4246-9192-17d4b995650f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.983785 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.989488 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.989634 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nfdh5dfhb6h64h676hc4h78h97h669h54chfbh696hb5h54bh5d4h6bh64h644h677h584h5cbh698h9dh5bbh5f8h5b8hcdh644h5c7h694hbfh589q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6kn4l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5ccc8479f9-6gwjh_openstack(897fe4b3-a755-4281-8f7d-c64d3551f950): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:54 crc kubenswrapper[4783]: E1002 11:11:54.991562 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" Oct 02 11:11:54 crc kubenswrapper[4783]: W1002 11:11:54.998691 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc9fa791a_e505_43ab_a361_897b21f24f89.slice/crio-91169b4ac3f23972b7a7980c438731fa8474b2443e1d35635177d5d8fcaf5ed6 WatchSource:0}: Error finding container 91169b4ac3f23972b7a7980c438731fa8474b2443e1d35635177d5d8fcaf5ed6: Status 404 returned error can't find the container with id 91169b4ac3f23972b7a7980c438731fa8474b2443e1d35635177d5d8fcaf5ed6 Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.035813 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f" event={"ID":"c9fa791a-e505-43ab-a361-897b21f24f89","Type":"ContainerStarted","Data":"91169b4ac3f23972b7a7980c438731fa8474b2443e1d35635177d5d8fcaf5ed6"} Oct 02 11:11:55 crc kubenswrapper[4783]: E1002 11:11:55.192202 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" Oct 02 11:11:55 crc kubenswrapper[4783]: E1002 11:11:55.192530 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified\\\"\"" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.281403 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-p5v7m"] Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.474021 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Oct 02 11:11:55 crc kubenswrapper[4783]: E1002 11:11:55.642054 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified" Oct 02 11:11:55 crc kubenswrapper[4783]: E1002 11:11:55.642500 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hqg79,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-78dd6ddcc-wg2cc_openstack(b5dacaee-2905-4af8-b754-54fb4d6091f5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:11:55 crc kubenswrapper[4783]: E1002 11:11:55.646505 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" podUID="b5dacaee-2905-4af8-b754-54fb4d6091f5" Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.665716 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.784853 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g9v5g\" (UniqueName: \"kubernetes.io/projected/a8adde51-a6cc-4ca0-ba60-76030ddb125d-kube-api-access-g9v5g\") pod \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.784905 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8adde51-a6cc-4ca0-ba60-76030ddb125d-config\") pod \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\" (UID: \"a8adde51-a6cc-4ca0-ba60-76030ddb125d\") " Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.785444 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8adde51-a6cc-4ca0-ba60-76030ddb125d-config" (OuterVolumeSpecName: "config") pod "a8adde51-a6cc-4ca0-ba60-76030ddb125d" (UID: "a8adde51-a6cc-4ca0-ba60-76030ddb125d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.791461 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8adde51-a6cc-4ca0-ba60-76030ddb125d-kube-api-access-g9v5g" (OuterVolumeSpecName: "kube-api-access-g9v5g") pod "a8adde51-a6cc-4ca0-ba60-76030ddb125d" (UID: "a8adde51-a6cc-4ca0-ba60-76030ddb125d"). InnerVolumeSpecName "kube-api-access-g9v5g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.886635 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g9v5g\" (UniqueName: \"kubernetes.io/projected/a8adde51-a6cc-4ca0-ba60-76030ddb125d-kube-api-access-g9v5g\") on node \"crc\" DevicePath \"\"" Oct 02 11:11:55 crc kubenswrapper[4783]: I1002 11:11:55.886674 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a8adde51-a6cc-4ca0-ba60-76030ddb125d-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.044053 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.044045 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-675f4bcbfc-4bkb5" event={"ID":"a8adde51-a6cc-4ca0-ba60-76030ddb125d","Type":"ContainerDied","Data":"07d60e089697e8c038e60f201d3b29f733103a218cdff2d141769bdea69c265f"} Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.048111 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-p5v7m" event={"ID":"246fea45-d93e-4975-bc2e-818ec7eafa99","Type":"ContainerStarted","Data":"ce664040eccbfd7094c9519791f77224333b3cde9e97dc9089aa6cc8df39efcb"} Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.120172 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bkb5"] Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.126588 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-675f4bcbfc-4bkb5"] Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.275362 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Oct 02 11:11:56 crc kubenswrapper[4783]: W1002 11:11:56.616471 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod627209ce_c546_42e5_b35e_ab8abd950ef8.slice/crio-eeaf52421dad4904f20d13a25e1dbd42df12278c7942a2ce575b9e858d1eeead WatchSource:0}: Error finding container eeaf52421dad4904f20d13a25e1dbd42df12278c7942a2ce575b9e858d1eeead: Status 404 returned error can't find the container with id eeaf52421dad4904f20d13a25e1dbd42df12278c7942a2ce575b9e858d1eeead Oct 02 11:11:56 crc kubenswrapper[4783]: W1002 11:11:56.634979 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf0a8aca8_fc00_4c0a_82b5_63fc50672f72.slice/crio-517271ccd703bacb11c3f40aeceee061e9aecf5bdd803b280c504b502b64a857 WatchSource:0}: Error finding container 517271ccd703bacb11c3f40aeceee061e9aecf5bdd803b280c504b502b64a857: Status 404 returned error can't find the container with id 517271ccd703bacb11c3f40aeceee061e9aecf5bdd803b280c504b502b64a857 Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.679399 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.799838 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hqg79\" (UniqueName: \"kubernetes.io/projected/b5dacaee-2905-4af8-b754-54fb4d6091f5-kube-api-access-hqg79\") pod \"b5dacaee-2905-4af8-b754-54fb4d6091f5\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.799929 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-dns-svc\") pod \"b5dacaee-2905-4af8-b754-54fb4d6091f5\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.799950 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-config\") pod \"b5dacaee-2905-4af8-b754-54fb4d6091f5\" (UID: \"b5dacaee-2905-4af8-b754-54fb4d6091f5\") " Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.800612 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-config" (OuterVolumeSpecName: "config") pod "b5dacaee-2905-4af8-b754-54fb4d6091f5" (UID: "b5dacaee-2905-4af8-b754-54fb4d6091f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.800890 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b5dacaee-2905-4af8-b754-54fb4d6091f5" (UID: "b5dacaee-2905-4af8-b754-54fb4d6091f5"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.804805 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5dacaee-2905-4af8-b754-54fb4d6091f5-kube-api-access-hqg79" (OuterVolumeSpecName: "kube-api-access-hqg79") pod "b5dacaee-2905-4af8-b754-54fb4d6091f5" (UID: "b5dacaee-2905-4af8-b754-54fb4d6091f5"). InnerVolumeSpecName "kube-api-access-hqg79". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.902283 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hqg79\" (UniqueName: \"kubernetes.io/projected/b5dacaee-2905-4af8-b754-54fb4d6091f5-kube-api-access-hqg79\") on node \"crc\" DevicePath \"\"" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.902323 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:11:56 crc kubenswrapper[4783]: I1002 11:11:56.902338 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b5dacaee-2905-4af8-b754-54fb4d6091f5-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.056338 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"627209ce-c546-42e5-b35e-ab8abd950ef8","Type":"ContainerStarted","Data":"eeaf52421dad4904f20d13a25e1dbd42df12278c7942a2ce575b9e858d1eeead"} Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.057605 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f0a8aca8-fc00-4c0a-82b5-63fc50672f72","Type":"ContainerStarted","Data":"517271ccd703bacb11c3f40aeceee061e9aecf5bdd803b280c504b502b64a857"} Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.059639 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" event={"ID":"b5dacaee-2905-4af8-b754-54fb4d6091f5","Type":"ContainerDied","Data":"1657a553c4498f736397a3a9f56e94d0079ea318693eb658c92514b12d86bc19"} Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.059769 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-78dd6ddcc-wg2cc" Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.130159 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wg2cc"] Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.136856 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-78dd6ddcc-wg2cc"] Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.563306 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8adde51-a6cc-4ca0-ba60-76030ddb125d" path="/var/lib/kubelet/pods/a8adde51-a6cc-4ca0-ba60-76030ddb125d/volumes" Oct 02 11:11:57 crc kubenswrapper[4783]: I1002 11:11:57.564279 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5dacaee-2905-4af8-b754-54fb4d6091f5" path="/var/lib/kubelet/pods/b5dacaee-2905-4af8-b754-54fb4d6091f5/volumes" Oct 02 11:12:00 crc kubenswrapper[4783]: I1002 11:12:00.086801 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"d68ca58a875615f7dd80b97789e236029261d5b31a2b176dd22b20723a10f851"} Oct 02 11:12:10 crc kubenswrapper[4783]: I1002 11:12:10.172048 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"f7b5af20-9931-4086-a593-8c0090ce8c12","Type":"ContainerStarted","Data":"b80c706bb739d4d71c42065b8f15b316e55bcedf978a3696a3b601d67109da33"} Oct 02 11:12:10 crc kubenswrapper[4783]: I1002 11:12:10.172891 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Oct 02 11:12:10 crc kubenswrapper[4783]: I1002 11:12:10.177733 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b969c99b-7cd2-413c-b9ea-4b0fc855fb66","Type":"ContainerStarted","Data":"cc4ec896ac609945ed78eacca2e8864dfcd26f8ed7cedf4f89f125dfde09fad3"} Oct 02 11:12:10 crc kubenswrapper[4783]: I1002 11:12:10.204869 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=3.346901577 podStartE2EDuration="1m20.204841888s" podCreationTimestamp="2025-10-02 11:10:50 +0000 UTC" firstStartedPulling="2025-10-02 11:10:51.505124681 +0000 UTC m=+1084.821318942" lastFinishedPulling="2025-10-02 11:12:08.363064992 +0000 UTC m=+1161.679259253" observedRunningTime="2025-10-02 11:12:10.198882036 +0000 UTC m=+1163.515076297" watchObservedRunningTime="2025-10-02 11:12:10.204841888 +0000 UTC m=+1163.521036189" Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.189037 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f","Type":"ContainerStarted","Data":"5519e8bf7ecbf73cd0360fa9494c1cdc2f27d3941645ec0fe3dd75922d9805d0"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.190677 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.192495 4783 generic.go:334] "Generic (PLEG): container finished" podID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerID="4d0c4dbce27e8dcf5e2e98a9b41b49b3271b2be9c76353bc30a970257dda91aa" exitCode=0 Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.192566 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" event={"ID":"8ed908e0-5c5b-4246-9192-17d4b995650f","Type":"ContainerDied","Data":"4d0c4dbce27e8dcf5e2e98a9b41b49b3271b2be9c76353bc30a970257dda91aa"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.194541 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c","Type":"ContainerStarted","Data":"aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.198175 4783 generic.go:334] "Generic (PLEG): container finished" podID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerID="2880c347084ccb3fd81d81004bb9fe5593eff10192157cca84d8c357a8841a49" exitCode=0 Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.198222 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" event={"ID":"897fe4b3-a755-4281-8f7d-c64d3551f950","Type":"ContainerDied","Data":"2880c347084ccb3fd81d81004bb9fe5593eff10192157cca84d8c357a8841a49"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.204795 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f" event={"ID":"c9fa791a-e505-43ab-a361-897b21f24f89","Type":"ContainerStarted","Data":"beeef7b4ca76eeb7d7fe631ce9938e54940681de26dbbd8b527e12d96cb3c7b1"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.205342 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-k7v6f" Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.207393 4783 generic.go:334] "Generic (PLEG): container finished" podID="246fea45-d93e-4975-bc2e-818ec7eafa99" containerID="b37f251be6c113d65ac868f673aee462c2e2b112b1e268274f42e5b5f6b01361" exitCode=0 Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.207462 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-p5v7m" event={"ID":"246fea45-d93e-4975-bc2e-818ec7eafa99","Type":"ContainerDied","Data":"b37f251be6c113d65ac868f673aee462c2e2b112b1e268274f42e5b5f6b01361"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.210431 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b3b5c94-1a3b-4486-9247-724deab20d81","Type":"ContainerStarted","Data":"4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.212736 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"627209ce-c546-42e5-b35e-ab8abd950ef8","Type":"ContainerStarted","Data":"378b3013f39c9826cfc7d5d76e998b82e6a608b487eedd10ae86afd6dbfa2b39"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.213854 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f0a8aca8-fc00-4c0a-82b5-63fc50672f72","Type":"ContainerStarted","Data":"2b95bf2938460677e86cb44c0bd74bc33d3b23115acf505c416c32c578108c53"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.215407 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8c174112-c7e3-43b2-b794-a085b9565b90","Type":"ContainerStarted","Data":"651f62e8a2e598e5c700f4817aa201515dc4543572d2e4f4cacb02ec7e4d151a"} Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.252837 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.934135038 podStartE2EDuration="1m19.252813841s" podCreationTimestamp="2025-10-02 11:10:52 +0000 UTC" firstStartedPulling="2025-10-02 11:10:53.278114246 +0000 UTC m=+1086.594308507" lastFinishedPulling="2025-10-02 11:12:09.596793049 +0000 UTC m=+1162.912987310" observedRunningTime="2025-10-02 11:12:11.235942842 +0000 UTC m=+1164.552137103" watchObservedRunningTime="2025-10-02 11:12:11.252813841 +0000 UTC m=+1164.569008102" Oct 02 11:12:11 crc kubenswrapper[4783]: I1002 11:12:11.312755 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-k7v6f" podStartSLOduration=59.91866981 podStartE2EDuration="1m14.312738538s" podCreationTimestamp="2025-10-02 11:10:57 +0000 UTC" firstStartedPulling="2025-10-02 11:11:55.192567215 +0000 UTC m=+1148.508761476" lastFinishedPulling="2025-10-02 11:12:09.586635933 +0000 UTC m=+1162.902830204" observedRunningTime="2025-10-02 11:12:11.309163111 +0000 UTC m=+1164.625357372" watchObservedRunningTime="2025-10-02 11:12:11.312738538 +0000 UTC m=+1164.628932789" Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.226934 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-p5v7m" event={"ID":"246fea45-d93e-4975-bc2e-818ec7eafa99","Type":"ContainerStarted","Data":"d02640761d5f9f08480a3842927a9984d101cd9ce4ddf31e47e3d48ca9938134"} Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.227211 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.227434 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.227474 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-p5v7m" event={"ID":"246fea45-d93e-4975-bc2e-818ec7eafa99","Type":"ContainerStarted","Data":"06c9505aab98efe890ac387aca753bda19b5196c10c4483dc829e6db52fb29cc"} Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.230601 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" event={"ID":"8ed908e0-5c5b-4246-9192-17d4b995650f","Type":"ContainerStarted","Data":"5df9875dd2aaab63ea083b19c3a6086fa9c3fb0a508e57aeac7e6d9f9f05fce3"} Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.230778 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.252242 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" event={"ID":"897fe4b3-a755-4281-8f7d-c64d3551f950","Type":"ContainerStarted","Data":"75c7db1e94be9b5b9045a63bf0e0402deb11d072b8a3ff603a8a0df389be04f8"} Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.252486 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.256678 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-p5v7m" podStartSLOduration=61.304836235 podStartE2EDuration="1m15.256659756s" podCreationTimestamp="2025-10-02 11:10:57 +0000 UTC" firstStartedPulling="2025-10-02 11:11:55.600338506 +0000 UTC m=+1148.916532777" lastFinishedPulling="2025-10-02 11:12:09.552162027 +0000 UTC m=+1162.868356298" observedRunningTime="2025-10-02 11:12:12.25129566 +0000 UTC m=+1165.567489941" watchObservedRunningTime="2025-10-02 11:12:12.256659756 +0000 UTC m=+1165.572854017" Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.270284 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podStartSLOduration=-9223371950.584509 podStartE2EDuration="1m26.270266855s" podCreationTimestamp="2025-10-02 11:10:46 +0000 UTC" firstStartedPulling="2025-10-02 11:10:48.11961378 +0000 UTC m=+1081.435808041" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:12:12.266283727 +0000 UTC m=+1165.582477988" watchObservedRunningTime="2025-10-02 11:12:12.270266855 +0000 UTC m=+1165.586461116" Oct 02 11:12:12 crc kubenswrapper[4783]: I1002 11:12:12.288785 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podStartSLOduration=3.6312735419999997 podStartE2EDuration="1m26.288768598s" podCreationTimestamp="2025-10-02 11:10:46 +0000 UTC" firstStartedPulling="2025-10-02 11:10:47.639899285 +0000 UTC m=+1080.956093546" lastFinishedPulling="2025-10-02 11:12:10.297394311 +0000 UTC m=+1163.613588602" observedRunningTime="2025-10-02 11:12:12.28002734 +0000 UTC m=+1165.596221601" watchObservedRunningTime="2025-10-02 11:12:12.288768598 +0000 UTC m=+1165.604962859" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.522163 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-pc22j"] Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.523736 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.527520 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.555108 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-pc22j"] Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.636579 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8040f1be-17e4-4424-942a-b7c2bcb55b0b-ovs-rundir\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.636638 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8040f1be-17e4-4424-942a-b7c2bcb55b0b-config\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.636742 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8040f1be-17e4-4424-942a-b7c2bcb55b0b-ovn-rundir\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.636775 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8040f1be-17e4-4424-942a-b7c2bcb55b0b-combined-ca-bundle\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.636817 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8040f1be-17e4-4424-942a-b7c2bcb55b0b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.636845 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcs4n\" (UniqueName: \"kubernetes.io/projected/8040f1be-17e4-4424-942a-b7c2bcb55b0b-kube-api-access-vcs4n\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.701217 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-tmt9k"] Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.701459 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" containerID="cri-o://5df9875dd2aaab63ea083b19c3a6086fa9c3fb0a508e57aeac7e6d9f9f05fce3" gracePeriod=10 Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738291 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8040f1be-17e4-4424-942a-b7c2bcb55b0b-ovs-rundir\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738346 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8040f1be-17e4-4424-942a-b7c2bcb55b0b-config\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738484 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8040f1be-17e4-4424-942a-b7c2bcb55b0b-ovn-rundir\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738520 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8040f1be-17e4-4424-942a-b7c2bcb55b0b-combined-ca-bundle\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738561 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8040f1be-17e4-4424-942a-b7c2bcb55b0b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738590 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcs4n\" (UniqueName: \"kubernetes.io/projected/8040f1be-17e4-4424-942a-b7c2bcb55b0b-kube-api-access-vcs4n\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738760 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/8040f1be-17e4-4424-942a-b7c2bcb55b0b-ovn-rundir\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.738861 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/8040f1be-17e4-4424-942a-b7c2bcb55b0b-ovs-rundir\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.739310 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8040f1be-17e4-4424-942a-b7c2bcb55b0b-config\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.748114 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/8040f1be-17e4-4424-942a-b7c2bcb55b0b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.749047 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8040f1be-17e4-4424-942a-b7c2bcb55b0b-combined-ca-bundle\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.755414 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-l4t57"] Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.757050 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.777396 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcs4n\" (UniqueName: \"kubernetes.io/projected/8040f1be-17e4-4424-942a-b7c2bcb55b0b-kube-api-access-vcs4n\") pod \"ovn-controller-metrics-pc22j\" (UID: \"8040f1be-17e4-4424-942a-b7c2bcb55b0b\") " pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.790576 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.806910 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-l4t57"] Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.839895 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24gtc\" (UniqueName: \"kubernetes.io/projected/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-kube-api-access-24gtc\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.839986 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.840022 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.840065 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-config\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.845606 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-pc22j" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.945134 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24gtc\" (UniqueName: \"kubernetes.io/projected/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-kube-api-access-24gtc\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.945271 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.945311 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.945366 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-config\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.946573 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-dns-svc\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.957541 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-config\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.958372 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-ovsdbserver-sb\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.978861 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.982268 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-6gwjh"] Oct 02 11:12:15 crc kubenswrapper[4783]: I1002 11:12:15.985644 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" containerID="cri-o://75c7db1e94be9b5b9045a63bf0e0402deb11d072b8a3ff603a8a0df389be04f8" gracePeriod=10 Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.022548 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24gtc\" (UniqueName: \"kubernetes.io/projected/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-kube-api-access-24gtc\") pod \"dnsmasq-dns-7f896c8c65-l4t57\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.065023 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r7hrq"] Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.066602 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.074964 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.084864 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r7hrq"] Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.138296 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.149248 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.149334 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.149386 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-config\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.149422 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.149456 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nx2pm\" (UniqueName: \"kubernetes.io/projected/eaf0e1d9-d343-4049-9725-a689da14aaa8-kube-api-access-nx2pm\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.250330 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-config\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.250403 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.250426 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nx2pm\" (UniqueName: \"kubernetes.io/projected/eaf0e1d9-d343-4049-9725-a689da14aaa8-kube-api-access-nx2pm\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.250492 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.250529 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.251228 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-config\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.251269 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-nb\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.251849 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-dns-svc\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.251994 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-sb\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.280198 4783 generic.go:334] "Generic (PLEG): container finished" podID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerID="75c7db1e94be9b5b9045a63bf0e0402deb11d072b8a3ff603a8a0df389be04f8" exitCode=0 Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.280241 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" event={"ID":"897fe4b3-a755-4281-8f7d-c64d3551f950","Type":"ContainerDied","Data":"75c7db1e94be9b5b9045a63bf0e0402deb11d072b8a3ff603a8a0df389be04f8"} Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.281621 4783 generic.go:334] "Generic (PLEG): container finished" podID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerID="5df9875dd2aaab63ea083b19c3a6086fa9c3fb0a508e57aeac7e6d9f9f05fce3" exitCode=0 Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.281643 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" event={"ID":"8ed908e0-5c5b-4246-9192-17d4b995650f","Type":"ContainerDied","Data":"5df9875dd2aaab63ea083b19c3a6086fa9c3fb0a508e57aeac7e6d9f9f05fce3"} Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.286973 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nx2pm\" (UniqueName: \"kubernetes.io/projected/eaf0e1d9-d343-4049-9725-a689da14aaa8-kube-api-access-nx2pm\") pod \"dnsmasq-dns-86db49b7ff-r7hrq\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.405230 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:16 crc kubenswrapper[4783]: I1002 11:12:16.487699 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.96:5353: connect: connection refused" Oct 02 11:12:17 crc kubenswrapper[4783]: I1002 11:12:17.114919 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.97:5353: connect: connection refused" Oct 02 11:12:21 crc kubenswrapper[4783]: I1002 11:12:21.485853 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.96:5353: connect: connection refused" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.113267 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.97:5353: connect: connection refused" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.650700 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.671186 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-l4t57"] Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.712637 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-698758b865-dj9rv"] Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.720932 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.750351 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-dj9rv"] Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.764770 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8r8wf\" (UniqueName: \"kubernetes.io/projected/965b24b4-4f8c-464b-a1ef-a51077d6c553-kube-api-access-8r8wf\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.764879 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.764913 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-dns-svc\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.764946 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.765020 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-config\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.866530 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8r8wf\" (UniqueName: \"kubernetes.io/projected/965b24b4-4f8c-464b-a1ef-a51077d6c553-kube-api-access-8r8wf\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.866576 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.866600 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-dns-svc\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.866627 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.866679 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-config\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.867432 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-nb\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.867529 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-dns-svc\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.868753 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-sb\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.868848 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-config\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:22 crc kubenswrapper[4783]: I1002 11:12:22.898702 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8r8wf\" (UniqueName: \"kubernetes.io/projected/965b24b4-4f8c-464b-a1ef-a51077d6c553-kube-api-access-8r8wf\") pod \"dnsmasq-dns-698758b865-dj9rv\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.047810 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.880550 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.900206 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.905630 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-c4lbk" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.905957 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.906326 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.906471 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.941185 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.982674 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5c831884-5efd-4048-b7df-d0edf1d51e89-cache\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.983104 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.983153 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kk8rj\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-kube-api-access-kk8rj\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.983180 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:23 crc kubenswrapper[4783]: I1002 11:12:23.983306 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5c831884-5efd-4048-b7df-d0edf1d51e89-lock\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.084998 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5c831884-5efd-4048-b7df-d0edf1d51e89-lock\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.085089 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5c831884-5efd-4048-b7df-d0edf1d51e89-cache\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.085140 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.085180 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kk8rj\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-kube-api-access-kk8rj\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.085204 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: E1002 11:12:24.085386 4783 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 02 11:12:24 crc kubenswrapper[4783]: E1002 11:12:24.085404 4783 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 02 11:12:24 crc kubenswrapper[4783]: E1002 11:12:24.085478 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift podName:5c831884-5efd-4048-b7df-d0edf1d51e89 nodeName:}" failed. No retries permitted until 2025-10-02 11:12:24.585456916 +0000 UTC m=+1177.901651177 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift") pod "swift-storage-0" (UID: "5c831884-5efd-4048-b7df-d0edf1d51e89") : configmap "swift-ring-files" not found Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.085698 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/5c831884-5efd-4048-b7df-d0edf1d51e89-lock\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.085742 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/5c831884-5efd-4048-b7df-d0edf1d51e89-cache\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.086008 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.116076 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kk8rj\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-kube-api-access-kk8rj\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.120393 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: I1002 11:12:24.594352 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:24 crc kubenswrapper[4783]: E1002 11:12:24.594525 4783 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 02 11:12:24 crc kubenswrapper[4783]: E1002 11:12:24.594539 4783 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 02 11:12:24 crc kubenswrapper[4783]: E1002 11:12:24.594578 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift podName:5c831884-5efd-4048-b7df-d0edf1d51e89 nodeName:}" failed. No retries permitted until 2025-10-02 11:12:25.594563928 +0000 UTC m=+1178.910758189 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift") pod "swift-storage-0" (UID: "5c831884-5efd-4048-b7df-d0edf1d51e89") : configmap "swift-ring-files" not found Oct 02 11:12:25 crc kubenswrapper[4783]: I1002 11:12:25.611449 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:25 crc kubenswrapper[4783]: E1002 11:12:25.611676 4783 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 02 11:12:25 crc kubenswrapper[4783]: E1002 11:12:25.611708 4783 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 02 11:12:25 crc kubenswrapper[4783]: E1002 11:12:25.611775 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift podName:5c831884-5efd-4048-b7df-d0edf1d51e89 nodeName:}" failed. No retries permitted until 2025-10-02 11:12:27.611751685 +0000 UTC m=+1180.927946026 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift") pod "swift-storage-0" (UID: "5c831884-5efd-4048-b7df-d0edf1d51e89") : configmap "swift-ring-files" not found Oct 02 11:12:26 crc kubenswrapper[4783]: I1002 11:12:26.485671 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.96:5353: connect: connection refused" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.113351 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.97:5353: connect: connection refused" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.644953 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:27 crc kubenswrapper[4783]: E1002 11:12:27.645161 4783 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 02 11:12:27 crc kubenswrapper[4783]: E1002 11:12:27.645194 4783 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 02 11:12:27 crc kubenswrapper[4783]: E1002 11:12:27.645272 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift podName:5c831884-5efd-4048-b7df-d0edf1d51e89 nodeName:}" failed. No retries permitted until 2025-10-02 11:12:31.645248275 +0000 UTC m=+1184.961442536 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift") pod "swift-storage-0" (UID: "5c831884-5efd-4048-b7df-d0edf1d51e89") : configmap "swift-ring-files" not found Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.870259 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-4b4vd"] Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.872076 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.875011 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.875063 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.875641 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.882550 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-4b4vd"] Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.949942 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-swiftconf\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.949998 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cx55x\" (UniqueName: \"kubernetes.io/projected/0072de5e-507d-4efe-8f60-f48b9799fe72-kube-api-access-cx55x\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.950077 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-scripts\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.950141 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-dispersionconf\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.950287 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0072de5e-507d-4efe-8f60-f48b9799fe72-etc-swift\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.950374 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-combined-ca-bundle\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:27 crc kubenswrapper[4783]: I1002 11:12:27.950490 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-ring-data-devices\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.053244 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-ring-data-devices\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.053567 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-swiftconf\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.053715 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cx55x\" (UniqueName: \"kubernetes.io/projected/0072de5e-507d-4efe-8f60-f48b9799fe72-kube-api-access-cx55x\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.054087 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-scripts\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.054472 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-dispersionconf\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.054627 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-ring-data-devices\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.054638 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0072de5e-507d-4efe-8f60-f48b9799fe72-etc-swift\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.054867 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-combined-ca-bundle\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.055112 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0072de5e-507d-4efe-8f60-f48b9799fe72-etc-swift\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.054756 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-scripts\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.063598 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-combined-ca-bundle\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.064154 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-swiftconf\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.064528 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-dispersionconf\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.083691 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cx55x\" (UniqueName: \"kubernetes.io/projected/0072de5e-507d-4efe-8f60-f48b9799fe72-kube-api-access-cx55x\") pod \"swift-ring-rebalance-4b4vd\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:28 crc kubenswrapper[4783]: I1002 11:12:28.199643 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:31 crc kubenswrapper[4783]: I1002 11:12:31.485631 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.96:5353: connect: connection refused" Oct 02 11:12:31 crc kubenswrapper[4783]: I1002 11:12:31.721586 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:31 crc kubenswrapper[4783]: E1002 11:12:31.721812 4783 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 02 11:12:31 crc kubenswrapper[4783]: E1002 11:12:31.721983 4783 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 02 11:12:31 crc kubenswrapper[4783]: E1002 11:12:31.722091 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift podName:5c831884-5efd-4048-b7df-d0edf1d51e89 nodeName:}" failed. No retries permitted until 2025-10-02 11:12:39.722056783 +0000 UTC m=+1193.038251084 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift") pod "swift-storage-0" (UID: "5c831884-5efd-4048-b7df-d0edf1d51e89") : configmap "swift-ring-files" not found Oct 02 11:12:32 crc kubenswrapper[4783]: I1002 11:12:32.113459 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.97:5353: connect: connection refused" Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.021264 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.022279 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xjwf5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-nb-0_openstack(627209ce-c546-42e5-b35e-ab8abd950ef8): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.029011 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-nb-0" podUID="627209ce-c546-42e5-b35e-ab8abd950ef8" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.201373 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.255440 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified" Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.255576 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstack-network-exporter,Image:quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified,Command:[/app/openstack-network-exporter],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:OPENSTACK_NETWORK_EXPORTER_YAML,Value:/etc/config/openstack-network-exporter.yaml,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:ovsdb-rundir,ReadOnly:false,MountPath:/tmp,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovnmetrics.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/private/ovnmetrics.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:metrics-certs-tls-certs,ReadOnly:true,MountPath:/etc/pki/tls/certs/ovndbca.crt,SubPath:ca.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-69vhd,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovsdbserver-sb-0_openstack(f0a8aca8-fc00-4c0a-82b5-63fc50672f72): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.256989 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ovsdbserver-sb-0" podUID="f0a8aca8-fc00-4c0a-82b5-63fc50672f72" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.276294 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.282172 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-dns-svc\") pod \"8ed908e0-5c5b-4246-9192-17d4b995650f\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.282423 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f9vtn\" (UniqueName: \"kubernetes.io/projected/8ed908e0-5c5b-4246-9192-17d4b995650f-kube-api-access-f9vtn\") pod \"8ed908e0-5c5b-4246-9192-17d4b995650f\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.282447 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-config\") pod \"8ed908e0-5c5b-4246-9192-17d4b995650f\" (UID: \"8ed908e0-5c5b-4246-9192-17d4b995650f\") " Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.293621 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8ed908e0-5c5b-4246-9192-17d4b995650f-kube-api-access-f9vtn" (OuterVolumeSpecName: "kube-api-access-f9vtn") pod "8ed908e0-5c5b-4246-9192-17d4b995650f" (UID: "8ed908e0-5c5b-4246-9192-17d4b995650f"). InnerVolumeSpecName "kube-api-access-f9vtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.364892 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "8ed908e0-5c5b-4246-9192-17d4b995650f" (UID: "8ed908e0-5c5b-4246-9192-17d4b995650f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.365918 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-config" (OuterVolumeSpecName: "config") pod "8ed908e0-5c5b-4246-9192-17d4b995650f" (UID: "8ed908e0-5c5b-4246-9192-17d4b995650f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.383765 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-dns-svc\") pod \"897fe4b3-a755-4281-8f7d-c64d3551f950\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.383906 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-config\") pod \"897fe4b3-a755-4281-8f7d-c64d3551f950\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.384045 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6kn4l\" (UniqueName: \"kubernetes.io/projected/897fe4b3-a755-4281-8f7d-c64d3551f950-kube-api-access-6kn4l\") pod \"897fe4b3-a755-4281-8f7d-c64d3551f950\" (UID: \"897fe4b3-a755-4281-8f7d-c64d3551f950\") " Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.384526 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f9vtn\" (UniqueName: \"kubernetes.io/projected/8ed908e0-5c5b-4246-9192-17d4b995650f-kube-api-access-f9vtn\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.384548 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.384560 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/8ed908e0-5c5b-4246-9192-17d4b995650f-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.393601 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/897fe4b3-a755-4281-8f7d-c64d3551f950-kube-api-access-6kn4l" (OuterVolumeSpecName: "kube-api-access-6kn4l") pod "897fe4b3-a755-4281-8f7d-c64d3551f950" (UID: "897fe4b3-a755-4281-8f7d-c64d3551f950"). InnerVolumeSpecName "kube-api-access-6kn4l". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.423701 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-config" (OuterVolumeSpecName: "config") pod "897fe4b3-a755-4281-8f7d-c64d3551f950" (UID: "897fe4b3-a755-4281-8f7d-c64d3551f950"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.444114 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r7hrq"] Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.448274 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" event={"ID":"8ed908e0-5c5b-4246-9192-17d4b995650f","Type":"ContainerDied","Data":"bd580ef726b45584f0e7e945cac141bf3a8e384b9bee9de0d3b596855333b260"} Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.448333 4783 scope.go:117] "RemoveContainer" containerID="5df9875dd2aaab63ea083b19c3a6086fa9c3fb0a508e57aeac7e6d9f9f05fce3" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.448609 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-57d769cc4f-tmt9k" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.455535 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" event={"ID":"897fe4b3-a755-4281-8f7d-c64d3551f950","Type":"ContainerDied","Data":"08223a1c44f187fc6f1697c463d50898995d33c1665f078884ae7f59d9621608"} Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.459746 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5ccc8479f9-6gwjh" Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.462774 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-nb-0" podUID="627209ce-c546-42e5-b35e-ab8abd950ef8" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.468591 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-l4t57"] Oct 02 11:12:35 crc kubenswrapper[4783]: E1002 11:12:35.472196 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstack-network-exporter\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/openstack-network-exporter:current-podified\\\"\"" pod="openstack/ovsdbserver-sb-0" podUID="f0a8aca8-fc00-4c0a-82b5-63fc50672f72" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.473861 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-pc22j"] Oct 02 11:12:35 crc kubenswrapper[4783]: W1002 11:12:35.477503 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8040f1be_17e4_4424_942a_b7c2bcb55b0b.slice/crio-61cd5a69c7045d4faaeb9ffe586c4bcb47e04a26d008ca2b8ddc51dcba0022a1 WatchSource:0}: Error finding container 61cd5a69c7045d4faaeb9ffe586c4bcb47e04a26d008ca2b8ddc51dcba0022a1: Status 404 returned error can't find the container with id 61cd5a69c7045d4faaeb9ffe586c4bcb47e04a26d008ca2b8ddc51dcba0022a1 Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.485646 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "897fe4b3-a755-4281-8f7d-c64d3551f950" (UID: "897fe4b3-a755-4281-8f7d-c64d3551f950"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.486138 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6kn4l\" (UniqueName: \"kubernetes.io/projected/897fe4b3-a755-4281-8f7d-c64d3551f950-kube-api-access-6kn4l\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.486180 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.486191 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/897fe4b3-a755-4281-8f7d-c64d3551f950-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.504527 4783 scope.go:117] "RemoveContainer" containerID="4d0c4dbce27e8dcf5e2e98a9b41b49b3271b2be9c76353bc30a970257dda91aa" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.549024 4783 scope.go:117] "RemoveContainer" containerID="75c7db1e94be9b5b9045a63bf0e0402deb11d072b8a3ff603a8a0df389be04f8" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.580063 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-tmt9k"] Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.580103 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-57d769cc4f-tmt9k"] Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.580756 4783 scope.go:117] "RemoveContainer" containerID="2880c347084ccb3fd81d81004bb9fe5593eff10192157cca84d8c357a8841a49" Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.649446 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-4b4vd"] Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.671009 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-698758b865-dj9rv"] Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.875360 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-6gwjh"] Oct 02 11:12:35 crc kubenswrapper[4783]: I1002 11:12:35.879900 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5ccc8479f9-6gwjh"] Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.468441 4783 generic.go:334] "Generic (PLEG): container finished" podID="7d318b7e-ab49-4930-b4d0-49a84ea8a71a" containerID="16ae80fcb7cc46b1d02c66e78777fb52fb5d23f067fb092afec307bd0d8d1fa3" exitCode=0 Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.468540 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" event={"ID":"7d318b7e-ab49-4930-b4d0-49a84ea8a71a","Type":"ContainerDied","Data":"16ae80fcb7cc46b1d02c66e78777fb52fb5d23f067fb092afec307bd0d8d1fa3"} Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.468649 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" event={"ID":"7d318b7e-ab49-4930-b4d0-49a84ea8a71a","Type":"ContainerStarted","Data":"3c798d9dd8832519bc37d85eae30f53f880ab1a359782504557d7047041af368"} Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.471245 4783 generic.go:334] "Generic (PLEG): container finished" podID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerID="4fa6af69157e015ff5cca42eab1f18f90d08255451ca2c12b0c5b41ed4a18792" exitCode=0 Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.471286 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" event={"ID":"eaf0e1d9-d343-4049-9725-a689da14aaa8","Type":"ContainerDied","Data":"4fa6af69157e015ff5cca42eab1f18f90d08255451ca2c12b0c5b41ed4a18792"} Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.471323 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" event={"ID":"eaf0e1d9-d343-4049-9725-a689da14aaa8","Type":"ContainerStarted","Data":"35a0cc2b15a3dc13f6d1ac16734ac6282690f70271446db15142eed8ac45536a"} Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.477941 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-pc22j" event={"ID":"8040f1be-17e4-4424-942a-b7c2bcb55b0b","Type":"ContainerStarted","Data":"61cd5a69c7045d4faaeb9ffe586c4bcb47e04a26d008ca2b8ddc51dcba0022a1"} Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.480816 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4b4vd" event={"ID":"0072de5e-507d-4efe-8f60-f48b9799fe72","Type":"ContainerStarted","Data":"217da11ea8acdc1a7d4c70ce3549891ab00501c58c94104a18f4215dcef7695a"} Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.500463 4783 generic.go:334] "Generic (PLEG): container finished" podID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerID="a491ac10e653f3630e7b804cb28e021907cc6f48e19506894e4e1729c6c15ec7" exitCode=0 Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.500515 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-dj9rv" event={"ID":"965b24b4-4f8c-464b-a1ef-a51077d6c553","Type":"ContainerDied","Data":"a491ac10e653f3630e7b804cb28e021907cc6f48e19506894e4e1729c6c15ec7"} Oct 02 11:12:36 crc kubenswrapper[4783]: I1002 11:12:36.500545 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-dj9rv" event={"ID":"965b24b4-4f8c-464b-a1ef-a51077d6c553","Type":"ContainerStarted","Data":"af83fd00e6d6e5e2cf3e7335f46ac6cbeb5951f51e8d6ccd788e017a396ff9a1"} Oct 02 11:12:37 crc kubenswrapper[4783]: E1002 11:12:37.316570 4783 log.go:32] "CreateContainer in sandbox from runtime service failed" err=< Oct 02 11:12:37 crc kubenswrapper[4783]: rpc error: code = Unknown desc = container create failed: mount `/var/lib/kubelet/pods/eaf0e1d9-d343-4049-9725-a689da14aaa8/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 02 11:12:37 crc kubenswrapper[4783]: > podSandboxID="35a0cc2b15a3dc13f6d1ac16734ac6282690f70271446db15142eed8ac45536a" Oct 02 11:12:37 crc kubenswrapper[4783]: E1002 11:12:37.317126 4783 kuberuntime_manager.go:1274] "Unhandled Error" err=< Oct 02 11:12:37 crc kubenswrapper[4783]: container &Container{Name:dnsmasq-dns,Image:quay.io/podified-antelope-centos9/openstack-neutron-server:current-podified,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n599h5cbh7ch5d4h66fh676hdbh546h95h88h5ffh55ch7fhch57ch687hddhc7h5fdh57dh674h56fh64ch98h9bh557h55dh646h54ch54fh5c4h597q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-nb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-nb,SubPath:ovsdbserver-nb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:ovsdbserver-sb,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/ovsdbserver-sb,SubPath:ovsdbserver-sb,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nx2pm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 5353 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-86db49b7ff-r7hrq_openstack(eaf0e1d9-d343-4049-9725-a689da14aaa8): CreateContainerError: container create failed: mount `/var/lib/kubelet/pods/eaf0e1d9-d343-4049-9725-a689da14aaa8/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory Oct 02 11:12:37 crc kubenswrapper[4783]: > logger="UnhandledError" Oct 02 11:12:37 crc kubenswrapper[4783]: E1002 11:12:37.318581 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"dnsmasq-dns\" with CreateContainerError: \"container create failed: mount `/var/lib/kubelet/pods/eaf0e1d9-d343-4049-9725-a689da14aaa8/volume-subpaths/dns-svc/dnsmasq-dns/1` to `etc/dnsmasq.d/hosts/dns-svc`: No such file or directory\\n\"" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.421673 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.477390 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.534706 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-dj9rv" event={"ID":"965b24b4-4f8c-464b-a1ef-a51077d6c553","Type":"ContainerStarted","Data":"4779511c664e628477f2484c1056f3e0dc0884f821699adf6e92e5a3f491c0ee"} Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.534938 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.539160 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" event={"ID":"7d318b7e-ab49-4930-b4d0-49a84ea8a71a","Type":"ContainerDied","Data":"3c798d9dd8832519bc37d85eae30f53f880ab1a359782504557d7047041af368"} Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.539193 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3c798d9dd8832519bc37d85eae30f53f880ab1a359782504557d7047041af368" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.550598 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.554513 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-698758b865-dj9rv" podStartSLOduration=15.554498438 podStartE2EDuration="15.554498438s" podCreationTimestamp="2025-10-02 11:12:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:12:37.550963602 +0000 UTC m=+1190.867157883" watchObservedRunningTime="2025-10-02 11:12:37.554498438 +0000 UTC m=+1190.870692699" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.566913 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" path="/var/lib/kubelet/pods/897fe4b3-a755-4281-8f7d-c64d3551f950/volumes" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.567669 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" path="/var/lib/kubelet/pods/8ed908e0-5c5b-4246-9192-17d4b995650f/volumes" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.743031 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-dns-svc\") pod \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.743111 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-24gtc\" (UniqueName: \"kubernetes.io/projected/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-kube-api-access-24gtc\") pod \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.743182 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-ovsdbserver-sb\") pod \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.743260 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-config\") pod \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\" (UID: \"7d318b7e-ab49-4930-b4d0-49a84ea8a71a\") " Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.746346 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-kube-api-access-24gtc" (OuterVolumeSpecName: "kube-api-access-24gtc") pod "7d318b7e-ab49-4930-b4d0-49a84ea8a71a" (UID: "7d318b7e-ab49-4930-b4d0-49a84ea8a71a"). InnerVolumeSpecName "kube-api-access-24gtc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.764779 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-config" (OuterVolumeSpecName: "config") pod "7d318b7e-ab49-4930-b4d0-49a84ea8a71a" (UID: "7d318b7e-ab49-4930-b4d0-49a84ea8a71a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.765705 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7d318b7e-ab49-4930-b4d0-49a84ea8a71a" (UID: "7d318b7e-ab49-4930-b4d0-49a84ea8a71a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.768548 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7d318b7e-ab49-4930-b4d0-49a84ea8a71a" (UID: "7d318b7e-ab49-4930-b4d0-49a84ea8a71a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.844867 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.845215 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.845227 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:37 crc kubenswrapper[4783]: I1002 11:12:37.845236 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-24gtc\" (UniqueName: \"kubernetes.io/projected/7d318b7e-ab49-4930-b4d0-49a84ea8a71a-kube-api-access-24gtc\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.336156 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.381845 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.422051 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.460591 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.548932 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-pc22j" event={"ID":"8040f1be-17e4-4424-942a-b7c2bcb55b0b","Type":"ContainerStarted","Data":"249382e7d0582a28906adf4cd5ec72d254226ebd487c633cf008356d95e22555"} Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.552162 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"f0a8aca8-fc00-4c0a-82b5-63fc50672f72","Type":"ContainerStarted","Data":"d5639fc5e6e0cb81aba666a384f9af2185f34e318ec96821861cc65ed6313b07"} Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.559868 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" event={"ID":"eaf0e1d9-d343-4049-9725-a689da14aaa8","Type":"ContainerStarted","Data":"87f28d796b95cfd9aedc9faf640a37c5712f10ce99d9d329ef4dca182ca3da26"} Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.559912 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7f896c8c65-l4t57" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.575062 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-pc22j" podStartSLOduration=21.690882831 podStartE2EDuration="23.575044207s" podCreationTimestamp="2025-10-02 11:12:15 +0000 UTC" firstStartedPulling="2025-10-02 11:12:35.505038484 +0000 UTC m=+1188.821232745" lastFinishedPulling="2025-10-02 11:12:37.38919986 +0000 UTC m=+1190.705394121" observedRunningTime="2025-10-02 11:12:38.562735622 +0000 UTC m=+1191.878929873" watchObservedRunningTime="2025-10-02 11:12:38.575044207 +0000 UTC m=+1191.891238468" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.594456 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" podStartSLOduration=23.594442583 podStartE2EDuration="23.594442583s" podCreationTimestamp="2025-10-02 11:12:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:12:38.59397192 +0000 UTC m=+1191.910166181" watchObservedRunningTime="2025-10-02 11:12:38.594442583 +0000 UTC m=+1191.910636834" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.646389 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=88.684361526 podStartE2EDuration="1m41.646367353s" podCreationTimestamp="2025-10-02 11:10:57 +0000 UTC" firstStartedPulling="2025-10-02 11:11:56.642955254 +0000 UTC m=+1149.959149525" lastFinishedPulling="2025-10-02 11:12:09.604961071 +0000 UTC m=+1162.921155352" observedRunningTime="2025-10-02 11:12:38.618775334 +0000 UTC m=+1191.934969595" watchObservedRunningTime="2025-10-02 11:12:38.646367353 +0000 UTC m=+1191.962561614" Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.658385 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-l4t57"] Oct 02 11:12:38 crc kubenswrapper[4783]: I1002 11:12:38.675272 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7f896c8c65-l4t57"] Oct 02 11:12:39 crc kubenswrapper[4783]: I1002 11:12:39.335917 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Oct 02 11:12:39 crc kubenswrapper[4783]: I1002 11:12:39.387314 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Oct 02 11:12:39 crc kubenswrapper[4783]: I1002 11:12:39.554870 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7d318b7e-ab49-4930-b4d0-49a84ea8a71a" path="/var/lib/kubelet/pods/7d318b7e-ab49-4930-b4d0-49a84ea8a71a/volumes" Oct 02 11:12:39 crc kubenswrapper[4783]: I1002 11:12:39.567955 4783 generic.go:334] "Generic (PLEG): container finished" podID="8c174112-c7e3-43b2-b794-a085b9565b90" containerID="651f62e8a2e598e5c700f4817aa201515dc4543572d2e4f4cacb02ec7e4d151a" exitCode=0 Oct 02 11:12:39 crc kubenswrapper[4783]: I1002 11:12:39.568281 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8c174112-c7e3-43b2-b794-a085b9565b90","Type":"ContainerDied","Data":"651f62e8a2e598e5c700f4817aa201515dc4543572d2e4f4cacb02ec7e4d151a"} Oct 02 11:12:39 crc kubenswrapper[4783]: I1002 11:12:39.782030 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:39 crc kubenswrapper[4783]: E1002 11:12:39.782245 4783 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Oct 02 11:12:39 crc kubenswrapper[4783]: E1002 11:12:39.782261 4783 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Oct 02 11:12:39 crc kubenswrapper[4783]: E1002 11:12:39.782301 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift podName:5c831884-5efd-4048-b7df-d0edf1d51e89 nodeName:}" failed. No retries permitted until 2025-10-02 11:12:55.782286594 +0000 UTC m=+1209.098480855 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift") pod "swift-storage-0" (UID: "5c831884-5efd-4048-b7df-d0edf1d51e89") : configmap "swift-ring-files" not found Oct 02 11:12:40 crc kubenswrapper[4783]: I1002 11:12:40.577213 4783 generic.go:334] "Generic (PLEG): container finished" podID="b969c99b-7cd2-413c-b9ea-4b0fc855fb66" containerID="cc4ec896ac609945ed78eacca2e8864dfcd26f8ed7cedf4f89f125dfde09fad3" exitCode=0 Oct 02 11:12:40 crc kubenswrapper[4783]: I1002 11:12:40.577315 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b969c99b-7cd2-413c-b9ea-4b0fc855fb66","Type":"ContainerDied","Data":"cc4ec896ac609945ed78eacca2e8864dfcd26f8ed7cedf4f89f125dfde09fad3"} Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.406007 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.588149 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"8c174112-c7e3-43b2-b794-a085b9565b90","Type":"ContainerStarted","Data":"b2478a5d092c5fcc95e06469562f6f8d6ed5f756051ac6e29ddd5b892a202173"} Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.592515 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"b969c99b-7cd2-413c-b9ea-4b0fc855fb66","Type":"ContainerStarted","Data":"73f0c3c72f26476622ca1bdf242d899bc876ba0cf1837e344aaf0fa92dd2c1b9"} Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.594885 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"627209ce-c546-42e5-b35e-ab8abd950ef8","Type":"ContainerStarted","Data":"cbeed8be45b7562b3185b65dcc5890c688835fc1e556d24f002c47f0452b8e65"} Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.621058 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=34.263421611 podStartE2EDuration="1m53.621037927s" podCreationTimestamp="2025-10-02 11:10:48 +0000 UTC" firstStartedPulling="2025-10-02 11:10:50.108174126 +0000 UTC m=+1083.424368377" lastFinishedPulling="2025-10-02 11:12:09.465790412 +0000 UTC m=+1162.781984693" observedRunningTime="2025-10-02 11:12:41.612966418 +0000 UTC m=+1194.929160679" watchObservedRunningTime="2025-10-02 11:12:41.621037927 +0000 UTC m=+1194.937232208" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.640584 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=91.693584278 podStartE2EDuration="1m44.640568577s" podCreationTimestamp="2025-10-02 11:10:57 +0000 UTC" firstStartedPulling="2025-10-02 11:11:56.625713816 +0000 UTC m=+1149.941908077" lastFinishedPulling="2025-10-02 11:12:09.572698105 +0000 UTC m=+1162.888892376" observedRunningTime="2025-10-02 11:12:41.637170585 +0000 UTC m=+1194.953364836" watchObservedRunningTime="2025-10-02 11:12:41.640568577 +0000 UTC m=+1194.956762838" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.773744 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Oct 02 11:12:41 crc kubenswrapper[4783]: E1002 11:12:41.774468 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="init" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774491 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="init" Oct 02 11:12:41 crc kubenswrapper[4783]: E1002 11:12:41.774503 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="init" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774511 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="init" Oct 02 11:12:41 crc kubenswrapper[4783]: E1002 11:12:41.774533 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774542 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" Oct 02 11:12:41 crc kubenswrapper[4783]: E1002 11:12:41.774552 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7d318b7e-ab49-4930-b4d0-49a84ea8a71a" containerName="init" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774560 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="7d318b7e-ab49-4930-b4d0-49a84ea8a71a" containerName="init" Oct 02 11:12:41 crc kubenswrapper[4783]: E1002 11:12:41.774576 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774584 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774778 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8ed908e0-5c5b-4246-9192-17d4b995650f" containerName="dnsmasq-dns" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774793 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="7d318b7e-ab49-4930-b4d0-49a84ea8a71a" containerName="init" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.774810 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="897fe4b3-a755-4281-8f7d-c64d3551f950" containerName="dnsmasq-dns" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.781918 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.785913 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.786138 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.786316 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.786535 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-zz86t" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.793796 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.826546 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-scripts\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.826626 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.826706 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-config\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.826753 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.826798 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.826832 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.826855 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfzps\" (UniqueName: \"kubernetes.io/projected/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-kube-api-access-tfzps\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.928698 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.928778 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-config\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.928812 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.928843 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.928869 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.928883 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfzps\" (UniqueName: \"kubernetes.io/projected/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-kube-api-access-tfzps\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.928907 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-scripts\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.929714 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-scripts\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.929970 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.930896 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-config\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.933651 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.934186 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.935648 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:41 crc kubenswrapper[4783]: I1002 11:12:41.951281 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfzps\" (UniqueName: \"kubernetes.io/projected/58303536-7e70-4db0-a6ac-0bcf69fb7aa6-kube-api-access-tfzps\") pod \"ovn-northd-0\" (UID: \"58303536-7e70-4db0-a6ac-0bcf69fb7aa6\") " pod="openstack/ovn-northd-0" Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.108545 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.557981 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-k7v6f" podUID="c9fa791a-e505-43ab-a361-897b21f24f89" containerName="ovn-controller" probeResult="failure" output=< Oct 02 11:12:42 crc kubenswrapper[4783]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Oct 02 11:12:42 crc kubenswrapper[4783]: > Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.580326 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:12:42 crc kubenswrapper[4783]: W1002 11:12:42.590304 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod58303536_7e70_4db0_a6ac_0bcf69fb7aa6.slice/crio-7d06d8dc0487da28777cffd3ec6e9956383c36c297c0d89409e64b2b8e979020 WatchSource:0}: Error finding container 7d06d8dc0487da28777cffd3ec6e9956383c36c297c0d89409e64b2b8e979020: Status 404 returned error can't find the container with id 7d06d8dc0487da28777cffd3ec6e9956383c36c297c0d89409e64b2b8e979020 Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.599200 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.604395 4783 generic.go:334] "Generic (PLEG): container finished" podID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerID="4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58" exitCode=0 Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.604601 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b3b5c94-1a3b-4486-9247-724deab20d81","Type":"ContainerDied","Data":"4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58"} Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.611138 4783 generic.go:334] "Generic (PLEG): container finished" podID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerID="aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011" exitCode=0 Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.611215 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c","Type":"ContainerDied","Data":"aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011"} Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.622060 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"58303536-7e70-4db0-a6ac-0bcf69fb7aa6","Type":"ContainerStarted","Data":"7d06d8dc0487da28777cffd3ec6e9956383c36c297c0d89409e64b2b8e979020"} Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.625031 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-p5v7m" Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.627496 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4b4vd" event={"ID":"0072de5e-507d-4efe-8f60-f48b9799fe72","Type":"ContainerStarted","Data":"ba273c9ad3c3fb0568c2b310900c180cc3c347fc6e3b674c54d39be1dfaea57a"} Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.668694 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=36.618821874 podStartE2EDuration="1m53.668675372s" podCreationTimestamp="2025-10-02 11:10:49 +0000 UTC" firstStartedPulling="2025-10-02 11:10:51.314100678 +0000 UTC m=+1084.630294939" lastFinishedPulling="2025-10-02 11:12:08.363954176 +0000 UTC m=+1161.680148437" observedRunningTime="2025-10-02 11:12:42.664152269 +0000 UTC m=+1195.980346530" watchObservedRunningTime="2025-10-02 11:12:42.668675372 +0000 UTC m=+1195.984869633" Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.749576 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-4b4vd" podStartSLOduration=9.540159439 podStartE2EDuration="15.749527717s" podCreationTimestamp="2025-10-02 11:12:27 +0000 UTC" firstStartedPulling="2025-10-02 11:12:35.652741884 +0000 UTC m=+1188.968936145" lastFinishedPulling="2025-10-02 11:12:41.862110152 +0000 UTC m=+1195.178304423" observedRunningTime="2025-10-02 11:12:42.683617507 +0000 UTC m=+1195.999811768" watchObservedRunningTime="2025-10-02 11:12:42.749527717 +0000 UTC m=+1196.065721978" Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.905317 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-k7v6f-config-f49gz"] Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.906281 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.911047 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 02 11:12:42 crc kubenswrapper[4783]: I1002 11:12:42.935991 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k7v6f-config-f49gz"] Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.049577 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.074889 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run-ovn\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.074969 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5tkpp\" (UniqueName: \"kubernetes.io/projected/d27b96ed-a479-422f-89fb-df19c3c399d8-kube-api-access-5tkpp\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.075007 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-additional-scripts\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.075039 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.075133 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-scripts\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.075160 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-log-ovn\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.158594 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r7hrq"] Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.159039 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerName="dnsmasq-dns" containerID="cri-o://87f28d796b95cfd9aedc9faf640a37c5712f10ce99d9d329ef4dca182ca3da26" gracePeriod=10 Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.164650 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.177586 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.177746 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-scripts\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.177774 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-log-ovn\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.177861 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run-ovn\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.177925 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5tkpp\" (UniqueName: \"kubernetes.io/projected/d27b96ed-a479-422f-89fb-df19c3c399d8-kube-api-access-5tkpp\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.177981 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-additional-scripts\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.179033 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-additional-scripts\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.179304 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.181105 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run-ovn\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.181450 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-log-ovn\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.183141 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-scripts\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.208094 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5tkpp\" (UniqueName: \"kubernetes.io/projected/d27b96ed-a479-422f-89fb-df19c3c399d8-kube-api-access-5tkpp\") pod \"ovn-controller-k7v6f-config-f49gz\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.221772 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.645279 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b3b5c94-1a3b-4486-9247-724deab20d81","Type":"ContainerStarted","Data":"d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c"} Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.646460 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.652290 4783 generic.go:334] "Generic (PLEG): container finished" podID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerID="87f28d796b95cfd9aedc9faf640a37c5712f10ce99d9d329ef4dca182ca3da26" exitCode=0 Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.652396 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" event={"ID":"eaf0e1d9-d343-4049-9725-a689da14aaa8","Type":"ContainerDied","Data":"87f28d796b95cfd9aedc9faf640a37c5712f10ce99d9d329ef4dca182ca3da26"} Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.658342 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c","Type":"ContainerStarted","Data":"6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a"} Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.658922 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.725020 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=48.922316972 podStartE2EDuration="1m57.725002801s" podCreationTimestamp="2025-10-02 11:10:46 +0000 UTC" firstStartedPulling="2025-10-02 11:10:48.458872329 +0000 UTC m=+1081.775066590" lastFinishedPulling="2025-10-02 11:11:57.261558118 +0000 UTC m=+1150.577752419" observedRunningTime="2025-10-02 11:12:43.690673689 +0000 UTC m=+1197.006867970" watchObservedRunningTime="2025-10-02 11:12:43.725002801 +0000 UTC m=+1197.041197062" Oct 02 11:12:43 crc kubenswrapper[4783]: I1002 11:12:43.732427 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=49.754372886 podStartE2EDuration="1m57.732396402s" podCreationTimestamp="2025-10-02 11:10:46 +0000 UTC" firstStartedPulling="2025-10-02 11:10:48.636342992 +0000 UTC m=+1081.952537253" lastFinishedPulling="2025-10-02 11:11:56.614366508 +0000 UTC m=+1149.930560769" observedRunningTime="2025-10-02 11:12:43.723651544 +0000 UTC m=+1197.039845805" watchObservedRunningTime="2025-10-02 11:12:43.732396402 +0000 UTC m=+1197.048590653" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.107918 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.191855 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k7v6f-config-f49gz"] Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.196283 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-dns-svc\") pod \"eaf0e1d9-d343-4049-9725-a689da14aaa8\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.196424 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nx2pm\" (UniqueName: \"kubernetes.io/projected/eaf0e1d9-d343-4049-9725-a689da14aaa8-kube-api-access-nx2pm\") pod \"eaf0e1d9-d343-4049-9725-a689da14aaa8\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.196624 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-config\") pod \"eaf0e1d9-d343-4049-9725-a689da14aaa8\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.196702 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-nb\") pod \"eaf0e1d9-d343-4049-9725-a689da14aaa8\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.207297 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-sb\") pod \"eaf0e1d9-d343-4049-9725-a689da14aaa8\" (UID: \"eaf0e1d9-d343-4049-9725-a689da14aaa8\") " Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.220648 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eaf0e1d9-d343-4049-9725-a689da14aaa8-kube-api-access-nx2pm" (OuterVolumeSpecName: "kube-api-access-nx2pm") pod "eaf0e1d9-d343-4049-9725-a689da14aaa8" (UID: "eaf0e1d9-d343-4049-9725-a689da14aaa8"). InnerVolumeSpecName "kube-api-access-nx2pm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.250641 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "eaf0e1d9-d343-4049-9725-a689da14aaa8" (UID: "eaf0e1d9-d343-4049-9725-a689da14aaa8"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.251093 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "eaf0e1d9-d343-4049-9725-a689da14aaa8" (UID: "eaf0e1d9-d343-4049-9725-a689da14aaa8"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.266627 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-config" (OuterVolumeSpecName: "config") pod "eaf0e1d9-d343-4049-9725-a689da14aaa8" (UID: "eaf0e1d9-d343-4049-9725-a689da14aaa8"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.270789 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "eaf0e1d9-d343-4049-9725-a689da14aaa8" (UID: "eaf0e1d9-d343-4049-9725-a689da14aaa8"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.311660 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.311933 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nx2pm\" (UniqueName: \"kubernetes.io/projected/eaf0e1d9-d343-4049-9725-a689da14aaa8-kube-api-access-nx2pm\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.312027 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.312140 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.312226 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/eaf0e1d9-d343-4049-9725-a689da14aaa8-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.669300 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f-config-f49gz" event={"ID":"d27b96ed-a479-422f-89fb-df19c3c399d8","Type":"ContainerStarted","Data":"52c7dceecfebc4093b015748e4aae44f3ca5cd3b77548bffb30eb3a0313d28ce"} Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.671850 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.674459 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-86db49b7ff-r7hrq" event={"ID":"eaf0e1d9-d343-4049-9725-a689da14aaa8","Type":"ContainerDied","Data":"35a0cc2b15a3dc13f6d1ac16734ac6282690f70271446db15142eed8ac45536a"} Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.674493 4783 scope.go:117] "RemoveContainer" containerID="87f28d796b95cfd9aedc9faf640a37c5712f10ce99d9d329ef4dca182ca3da26" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.699296 4783 scope.go:117] "RemoveContainer" containerID="4fa6af69157e015ff5cca42eab1f18f90d08255451ca2c12b0c5b41ed4a18792" Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.714161 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r7hrq"] Oct 02 11:12:44 crc kubenswrapper[4783]: I1002 11:12:44.727997 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-86db49b7ff-r7hrq"] Oct 02 11:12:45 crc kubenswrapper[4783]: I1002 11:12:45.559012 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" path="/var/lib/kubelet/pods/eaf0e1d9-d343-4049-9725-a689da14aaa8/volumes" Oct 02 11:12:45 crc kubenswrapper[4783]: I1002 11:12:45.680019 4783 generic.go:334] "Generic (PLEG): container finished" podID="d27b96ed-a479-422f-89fb-df19c3c399d8" containerID="bcfcefc8facb8e60fce5bfc07ac844bba734127445fd330b70ac307d45ead251" exitCode=0 Oct 02 11:12:45 crc kubenswrapper[4783]: I1002 11:12:45.680058 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f-config-f49gz" event={"ID":"d27b96ed-a479-422f-89fb-df19c3c399d8","Type":"ContainerDied","Data":"bcfcefc8facb8e60fce5bfc07ac844bba734127445fd330b70ac307d45ead251"} Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.626866 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.643774 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-k7v6f" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.699958 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f-config-f49gz" event={"ID":"d27b96ed-a479-422f-89fb-df19c3c399d8","Type":"ContainerDied","Data":"52c7dceecfebc4093b015748e4aae44f3ca5cd3b77548bffb30eb3a0313d28ce"} Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.699996 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52c7dceecfebc4093b015748e4aae44f3ca5cd3b77548bffb30eb3a0313d28ce" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.700046 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-f49gz" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.770100 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5tkpp\" (UniqueName: \"kubernetes.io/projected/d27b96ed-a479-422f-89fb-df19c3c399d8-kube-api-access-5tkpp\") pod \"d27b96ed-a479-422f-89fb-df19c3c399d8\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.770160 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-scripts\") pod \"d27b96ed-a479-422f-89fb-df19c3c399d8\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.770249 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-log-ovn\") pod \"d27b96ed-a479-422f-89fb-df19c3c399d8\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.770327 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run\") pod \"d27b96ed-a479-422f-89fb-df19c3c399d8\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.770383 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-additional-scripts\") pod \"d27b96ed-a479-422f-89fb-df19c3c399d8\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.770444 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run-ovn\") pod \"d27b96ed-a479-422f-89fb-df19c3c399d8\" (UID: \"d27b96ed-a479-422f-89fb-df19c3c399d8\") " Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.771349 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "d27b96ed-a479-422f-89fb-df19c3c399d8" (UID: "d27b96ed-a479-422f-89fb-df19c3c399d8"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.771565 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "d27b96ed-a479-422f-89fb-df19c3c399d8" (UID: "d27b96ed-a479-422f-89fb-df19c3c399d8"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.771814 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "d27b96ed-a479-422f-89fb-df19c3c399d8" (UID: "d27b96ed-a479-422f-89fb-df19c3c399d8"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.771904 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run" (OuterVolumeSpecName: "var-run") pod "d27b96ed-a479-422f-89fb-df19c3c399d8" (UID: "d27b96ed-a479-422f-89fb-df19c3c399d8"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.772362 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-scripts" (OuterVolumeSpecName: "scripts") pod "d27b96ed-a479-422f-89fb-df19c3c399d8" (UID: "d27b96ed-a479-422f-89fb-df19c3c399d8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.802228 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d27b96ed-a479-422f-89fb-df19c3c399d8-kube-api-access-5tkpp" (OuterVolumeSpecName: "kube-api-access-5tkpp") pod "d27b96ed-a479-422f-89fb-df19c3c399d8" (UID: "d27b96ed-a479-422f-89fb-df19c3c399d8"). InnerVolumeSpecName "kube-api-access-5tkpp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.871874 4783 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.871900 4783 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.871909 4783 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.871918 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5tkpp\" (UniqueName: \"kubernetes.io/projected/d27b96ed-a479-422f-89fb-df19c3c399d8-kube-api-access-5tkpp\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.871927 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/d27b96ed-a479-422f-89fb-df19c3c399d8-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:47 crc kubenswrapper[4783]: I1002 11:12:47.871935 4783 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/d27b96ed-a479-422f-89fb-df19c3c399d8-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.743013 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-k7v6f-config-f49gz"] Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.751720 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-k7v6f-config-f49gz"] Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.833749 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-k7v6f-config-6tlfn"] Oct 02 11:12:48 crc kubenswrapper[4783]: E1002 11:12:48.834502 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerName="init" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.834617 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerName="init" Oct 02 11:12:48 crc kubenswrapper[4783]: E1002 11:12:48.834677 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerName="dnsmasq-dns" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.834736 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerName="dnsmasq-dns" Oct 02 11:12:48 crc kubenswrapper[4783]: E1002 11:12:48.834820 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d27b96ed-a479-422f-89fb-df19c3c399d8" containerName="ovn-config" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.834893 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d27b96ed-a479-422f-89fb-df19c3c399d8" containerName="ovn-config" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.835158 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="eaf0e1d9-d343-4049-9725-a689da14aaa8" containerName="dnsmasq-dns" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.835292 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d27b96ed-a479-422f-89fb-df19c3c399d8" containerName="ovn-config" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.836214 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.840088 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.850759 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k7v6f-config-6tlfn"] Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.986956 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-log-ovn\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.987041 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-scripts\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.987094 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-additional-scripts\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.987115 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.987190 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run-ovn\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:48 crc kubenswrapper[4783]: I1002 11:12:48.987252 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbztr\" (UniqueName: \"kubernetes.io/projected/018f20a9-52dd-48ff-9a9a-17eb41e690e9-kube-api-access-qbztr\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.088398 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run-ovn\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.088667 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbztr\" (UniqueName: \"kubernetes.io/projected/018f20a9-52dd-48ff-9a9a-17eb41e690e9-kube-api-access-qbztr\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.088727 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run-ovn\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.088946 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-log-ovn\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.088968 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-log-ovn\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.089078 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-scripts\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.089171 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-additional-scripts\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.089201 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.089308 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.089952 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-additional-scripts\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.091510 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-scripts\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.106933 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbztr\" (UniqueName: \"kubernetes.io/projected/018f20a9-52dd-48ff-9a9a-17eb41e690e9-kube-api-access-qbztr\") pod \"ovn-controller-k7v6f-config-6tlfn\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.157186 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.467790 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-k7v6f-config-6tlfn"] Oct 02 11:12:49 crc kubenswrapper[4783]: W1002 11:12:49.491395 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod018f20a9_52dd_48ff_9a9a_17eb41e690e9.slice/crio-85dc73e2cc8d6e698c91fa23747f73f03547a448d64e5f3f0289c2e7d22a0327 WatchSource:0}: Error finding container 85dc73e2cc8d6e698c91fa23747f73f03547a448d64e5f3f0289c2e7d22a0327: Status 404 returned error can't find the container with id 85dc73e2cc8d6e698c91fa23747f73f03547a448d64e5f3f0289c2e7d22a0327 Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.568077 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d27b96ed-a479-422f-89fb-df19c3c399d8" path="/var/lib/kubelet/pods/d27b96ed-a479-422f-89fb-df19c3c399d8/volumes" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.568853 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.568882 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Oct 02 11:12:49 crc kubenswrapper[4783]: I1002 11:12:49.717741 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f-config-6tlfn" event={"ID":"018f20a9-52dd-48ff-9a9a-17eb41e690e9","Type":"ContainerStarted","Data":"85dc73e2cc8d6e698c91fa23747f73f03547a448d64e5f3f0289c2e7d22a0327"} Oct 02 11:12:50 crc kubenswrapper[4783]: I1002 11:12:50.726637 4783 generic.go:334] "Generic (PLEG): container finished" podID="018f20a9-52dd-48ff-9a9a-17eb41e690e9" containerID="9c29fed7b93014049297836339c82e8776595e7344232995cf55de7be2cdd265" exitCode=0 Oct 02 11:12:50 crc kubenswrapper[4783]: I1002 11:12:50.726717 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f-config-6tlfn" event={"ID":"018f20a9-52dd-48ff-9a9a-17eb41e690e9","Type":"ContainerDied","Data":"9c29fed7b93014049297836339c82e8776595e7344232995cf55de7be2cdd265"} Oct 02 11:12:50 crc kubenswrapper[4783]: I1002 11:12:50.818205 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Oct 02 11:12:50 crc kubenswrapper[4783]: I1002 11:12:50.818298 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Oct 02 11:12:51 crc kubenswrapper[4783]: I1002 11:12:51.738334 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"58303536-7e70-4db0-a6ac-0bcf69fb7aa6","Type":"ContainerStarted","Data":"d18185145e3886fe7f1ee3ee4f5a02668c0af6e5c219037fa40062f00be12dfc"} Oct 02 11:12:51 crc kubenswrapper[4783]: I1002 11:12:51.739687 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"58303536-7e70-4db0-a6ac-0bcf69fb7aa6","Type":"ContainerStarted","Data":"7a5666d203b120aa51dd9f8339dfa71aaaeaaf86e0642d87120b70480eb047d2"} Oct 02 11:12:51 crc kubenswrapper[4783]: I1002 11:12:51.739797 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Oct 02 11:12:51 crc kubenswrapper[4783]: I1002 11:12:51.765185 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.4626921299999998 podStartE2EDuration="10.765162537s" podCreationTimestamp="2025-10-02 11:12:41 +0000 UTC" firstStartedPulling="2025-10-02 11:12:42.594557329 +0000 UTC m=+1195.910751590" lastFinishedPulling="2025-10-02 11:12:50.897027736 +0000 UTC m=+1204.213221997" observedRunningTime="2025-10-02 11:12:51.763021939 +0000 UTC m=+1205.079216210" watchObservedRunningTime="2025-10-02 11:12:51.765162537 +0000 UTC m=+1205.081356808" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.424928 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.538968 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run\") pod \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.539105 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run" (OuterVolumeSpecName: "var-run") pod "018f20a9-52dd-48ff-9a9a-17eb41e690e9" (UID: "018f20a9-52dd-48ff-9a9a-17eb41e690e9"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.539163 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-scripts\") pod \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.539308 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run-ovn\") pod \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.539360 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbztr\" (UniqueName: \"kubernetes.io/projected/018f20a9-52dd-48ff-9a9a-17eb41e690e9-kube-api-access-qbztr\") pod \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.539483 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "018f20a9-52dd-48ff-9a9a-17eb41e690e9" (UID: "018f20a9-52dd-48ff-9a9a-17eb41e690e9"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.539967 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "018f20a9-52dd-48ff-9a9a-17eb41e690e9" (UID: "018f20a9-52dd-48ff-9a9a-17eb41e690e9"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540189 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-additional-scripts\") pod \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540287 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-log-ovn\") pod \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\" (UID: \"018f20a9-52dd-48ff-9a9a-17eb41e690e9\") " Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540369 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "018f20a9-52dd-48ff-9a9a-17eb41e690e9" (UID: "018f20a9-52dd-48ff-9a9a-17eb41e690e9"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540569 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-scripts" (OuterVolumeSpecName: "scripts") pod "018f20a9-52dd-48ff-9a9a-17eb41e690e9" (UID: "018f20a9-52dd-48ff-9a9a-17eb41e690e9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540908 4783 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-log-ovn\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540925 4783 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540936 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540947 4783 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/018f20a9-52dd-48ff-9a9a-17eb41e690e9-var-run-ovn\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.540980 4783 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/018f20a9-52dd-48ff-9a9a-17eb41e690e9-additional-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.553444 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/018f20a9-52dd-48ff-9a9a-17eb41e690e9-kube-api-access-qbztr" (OuterVolumeSpecName: "kube-api-access-qbztr") pod "018f20a9-52dd-48ff-9a9a-17eb41e690e9" (UID: "018f20a9-52dd-48ff-9a9a-17eb41e690e9"). InnerVolumeSpecName "kube-api-access-qbztr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.642778 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbztr\" (UniqueName: \"kubernetes.io/projected/018f20a9-52dd-48ff-9a9a-17eb41e690e9-kube-api-access-qbztr\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.748994 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-k7v6f-config-6tlfn" Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.756673 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-k7v6f-config-6tlfn" event={"ID":"018f20a9-52dd-48ff-9a9a-17eb41e690e9","Type":"ContainerDied","Data":"85dc73e2cc8d6e698c91fa23747f73f03547a448d64e5f3f0289c2e7d22a0327"} Oct 02 11:12:52 crc kubenswrapper[4783]: I1002 11:12:52.756748 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85dc73e2cc8d6e698c91fa23747f73f03547a448d64e5f3f0289c2e7d22a0327" Oct 02 11:12:53 crc kubenswrapper[4783]: I1002 11:12:53.519232 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-k7v6f-config-6tlfn"] Oct 02 11:12:53 crc kubenswrapper[4783]: I1002 11:12:53.531019 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-k7v6f-config-6tlfn"] Oct 02 11:12:53 crc kubenswrapper[4783]: I1002 11:12:53.556488 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="018f20a9-52dd-48ff-9a9a-17eb41e690e9" path="/var/lib/kubelet/pods/018f20a9-52dd-48ff-9a9a-17eb41e690e9/volumes" Oct 02 11:12:53 crc kubenswrapper[4783]: I1002 11:12:53.756849 4783 generic.go:334] "Generic (PLEG): container finished" podID="0072de5e-507d-4efe-8f60-f48b9799fe72" containerID="ba273c9ad3c3fb0568c2b310900c180cc3c347fc6e3b674c54d39be1dfaea57a" exitCode=0 Oct 02 11:12:53 crc kubenswrapper[4783]: I1002 11:12:53.756932 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4b4vd" event={"ID":"0072de5e-507d-4efe-8f60-f48b9799fe72","Type":"ContainerDied","Data":"ba273c9ad3c3fb0568c2b310900c180cc3c347fc6e3b674c54d39be1dfaea57a"} Oct 02 11:12:54 crc kubenswrapper[4783]: I1002 11:12:54.320521 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Oct 02 11:12:54 crc kubenswrapper[4783]: I1002 11:12:54.380179 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/openstack-galera-0" podUID="b969c99b-7cd2-413c-b9ea-4b0fc855fb66" containerName="galera" probeResult="failure" output=< Oct 02 11:12:54 crc kubenswrapper[4783]: wsrep_local_state_comment (Joined) differs from Synced Oct 02 11:12:54 crc kubenswrapper[4783]: > Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.101700 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.186222 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-scripts\") pod \"0072de5e-507d-4efe-8f60-f48b9799fe72\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.186381 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cx55x\" (UniqueName: \"kubernetes.io/projected/0072de5e-507d-4efe-8f60-f48b9799fe72-kube-api-access-cx55x\") pod \"0072de5e-507d-4efe-8f60-f48b9799fe72\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.186999 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-combined-ca-bundle\") pod \"0072de5e-507d-4efe-8f60-f48b9799fe72\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.187105 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-ring-data-devices\") pod \"0072de5e-507d-4efe-8f60-f48b9799fe72\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.187125 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-dispersionconf\") pod \"0072de5e-507d-4efe-8f60-f48b9799fe72\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.187145 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-swiftconf\") pod \"0072de5e-507d-4efe-8f60-f48b9799fe72\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.187177 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0072de5e-507d-4efe-8f60-f48b9799fe72-etc-swift\") pod \"0072de5e-507d-4efe-8f60-f48b9799fe72\" (UID: \"0072de5e-507d-4efe-8f60-f48b9799fe72\") " Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.187591 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "0072de5e-507d-4efe-8f60-f48b9799fe72" (UID: "0072de5e-507d-4efe-8f60-f48b9799fe72"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.188132 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0072de5e-507d-4efe-8f60-f48b9799fe72-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "0072de5e-507d-4efe-8f60-f48b9799fe72" (UID: "0072de5e-507d-4efe-8f60-f48b9799fe72"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.193703 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0072de5e-507d-4efe-8f60-f48b9799fe72-kube-api-access-cx55x" (OuterVolumeSpecName: "kube-api-access-cx55x") pod "0072de5e-507d-4efe-8f60-f48b9799fe72" (UID: "0072de5e-507d-4efe-8f60-f48b9799fe72"). InnerVolumeSpecName "kube-api-access-cx55x". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.194000 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "0072de5e-507d-4efe-8f60-f48b9799fe72" (UID: "0072de5e-507d-4efe-8f60-f48b9799fe72"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.207431 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-scripts" (OuterVolumeSpecName: "scripts") pod "0072de5e-507d-4efe-8f60-f48b9799fe72" (UID: "0072de5e-507d-4efe-8f60-f48b9799fe72"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.216593 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0072de5e-507d-4efe-8f60-f48b9799fe72" (UID: "0072de5e-507d-4efe-8f60-f48b9799fe72"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.219160 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "0072de5e-507d-4efe-8f60-f48b9799fe72" (UID: "0072de5e-507d-4efe-8f60-f48b9799fe72"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.288694 4783 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-ring-data-devices\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.288725 4783 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-dispersionconf\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.288733 4783 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-swiftconf\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.288743 4783 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/0072de5e-507d-4efe-8f60-f48b9799fe72-etc-swift\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.288753 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/0072de5e-507d-4efe-8f60-f48b9799fe72-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.288761 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cx55x\" (UniqueName: \"kubernetes.io/projected/0072de5e-507d-4efe-8f60-f48b9799fe72-kube-api-access-cx55x\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.288771 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0072de5e-507d-4efe-8f60-f48b9799fe72-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.773657 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-4b4vd" event={"ID":"0072de5e-507d-4efe-8f60-f48b9799fe72","Type":"ContainerDied","Data":"217da11ea8acdc1a7d4c70ce3549891ab00501c58c94104a18f4215dcef7695a"} Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.773728 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-4b4vd" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.773742 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="217da11ea8acdc1a7d4c70ce3549891ab00501c58c94104a18f4215dcef7695a" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.796473 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:55 crc kubenswrapper[4783]: I1002 11:12:55.804260 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/5c831884-5efd-4048-b7df-d0edf1d51e89-etc-swift\") pod \"swift-storage-0\" (UID: \"5c831884-5efd-4048-b7df-d0edf1d51e89\") " pod="openstack/swift-storage-0" Oct 02 11:12:56 crc kubenswrapper[4783]: I1002 11:12:56.019516 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Oct 02 11:12:56 crc kubenswrapper[4783]: I1002 11:12:56.607251 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Oct 02 11:12:56 crc kubenswrapper[4783]: W1002 11:12:56.614774 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod5c831884_5efd_4048_b7df_d0edf1d51e89.slice/crio-e8259311ea9f107eeb1a49593969a626cbd88f6370cdeb5e5d8366d5bc0847e0 WatchSource:0}: Error finding container e8259311ea9f107eeb1a49593969a626cbd88f6370cdeb5e5d8366d5bc0847e0: Status 404 returned error can't find the container with id e8259311ea9f107eeb1a49593969a626cbd88f6370cdeb5e5d8366d5bc0847e0 Oct 02 11:12:56 crc kubenswrapper[4783]: I1002 11:12:56.781514 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"e8259311ea9f107eeb1a49593969a626cbd88f6370cdeb5e5d8366d5bc0847e0"} Oct 02 11:12:56 crc kubenswrapper[4783]: I1002 11:12:56.984686 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Oct 02 11:12:57 crc kubenswrapper[4783]: I1002 11:12:57.238394 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Oct 02 11:12:57 crc kubenswrapper[4783]: I1002 11:12:57.778654 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:12:58 crc kubenswrapper[4783]: I1002 11:12:58.335609 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 02 11:12:59 crc kubenswrapper[4783]: I1002 11:12:59.788733 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Oct 02 11:12:59 crc kubenswrapper[4783]: I1002 11:12:59.806897 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"403e1ab6e50945ab55f109ae00e1ffd3d0824b300cb08b8b21a2dee00de395db"} Oct 02 11:12:59 crc kubenswrapper[4783]: I1002 11:12:59.806944 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"b9bdf6219f6d83d182fcc8b701081877feb5a7442abdd07957cea3b5927cd9d4"} Oct 02 11:12:59 crc kubenswrapper[4783]: I1002 11:12:59.806958 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"8ed1868083c9b63d76cfa7ab893a758e2960f1b1a45ea3f436636a9a8039689f"} Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.683094 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-wqbkd"] Oct 02 11:13:00 crc kubenswrapper[4783]: E1002 11:13:00.683502 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="018f20a9-52dd-48ff-9a9a-17eb41e690e9" containerName="ovn-config" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.683524 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="018f20a9-52dd-48ff-9a9a-17eb41e690e9" containerName="ovn-config" Oct 02 11:13:00 crc kubenswrapper[4783]: E1002 11:13:00.683565 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0072de5e-507d-4efe-8f60-f48b9799fe72" containerName="swift-ring-rebalance" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.683574 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0072de5e-507d-4efe-8f60-f48b9799fe72" containerName="swift-ring-rebalance" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.683806 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="018f20a9-52dd-48ff-9a9a-17eb41e690e9" containerName="ovn-config" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.683846 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0072de5e-507d-4efe-8f60-f48b9799fe72" containerName="swift-ring-rebalance" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.684489 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wqbkd" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.700468 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wqbkd"] Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.794160 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qg4q\" (UniqueName: \"kubernetes.io/projected/ad34849d-3626-40fd-9ed2-a0c3064d12d0-kube-api-access-8qg4q\") pod \"keystone-db-create-wqbkd\" (UID: \"ad34849d-3626-40fd-9ed2-a0c3064d12d0\") " pod="openstack/keystone-db-create-wqbkd" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.817548 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"26bd249bbe0f5b81df882ef35ea3b1a5a463564a245e2889692f58bb82228e64"} Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.879696 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-ql48j"] Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.880673 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ql48j" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.896103 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qg4q\" (UniqueName: \"kubernetes.io/projected/ad34849d-3626-40fd-9ed2-a0c3064d12d0-kube-api-access-8qg4q\") pod \"keystone-db-create-wqbkd\" (UID: \"ad34849d-3626-40fd-9ed2-a0c3064d12d0\") " pod="openstack/keystone-db-create-wqbkd" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.901494 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ql48j"] Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.949146 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qg4q\" (UniqueName: \"kubernetes.io/projected/ad34849d-3626-40fd-9ed2-a0c3064d12d0-kube-api-access-8qg4q\") pod \"keystone-db-create-wqbkd\" (UID: \"ad34849d-3626-40fd-9ed2-a0c3064d12d0\") " pod="openstack/keystone-db-create-wqbkd" Oct 02 11:13:00 crc kubenswrapper[4783]: I1002 11:13:00.998243 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvzw6\" (UniqueName: \"kubernetes.io/projected/b0930159-862f-47a8-9516-5b9aaf532653-kube-api-access-tvzw6\") pod \"placement-db-create-ql48j\" (UID: \"b0930159-862f-47a8-9516-5b9aaf532653\") " pod="openstack/placement-db-create-ql48j" Oct 02 11:13:01 crc kubenswrapper[4783]: I1002 11:13:01.002541 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wqbkd" Oct 02 11:13:01 crc kubenswrapper[4783]: I1002 11:13:01.123170 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvzw6\" (UniqueName: \"kubernetes.io/projected/b0930159-862f-47a8-9516-5b9aaf532653-kube-api-access-tvzw6\") pod \"placement-db-create-ql48j\" (UID: \"b0930159-862f-47a8-9516-5b9aaf532653\") " pod="openstack/placement-db-create-ql48j" Oct 02 11:13:01 crc kubenswrapper[4783]: I1002 11:13:01.154350 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvzw6\" (UniqueName: \"kubernetes.io/projected/b0930159-862f-47a8-9516-5b9aaf532653-kube-api-access-tvzw6\") pod \"placement-db-create-ql48j\" (UID: \"b0930159-862f-47a8-9516-5b9aaf532653\") " pod="openstack/placement-db-create-ql48j" Oct 02 11:13:01 crc kubenswrapper[4783]: I1002 11:13:01.196369 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ql48j" Oct 02 11:13:01 crc kubenswrapper[4783]: I1002 11:13:01.477942 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-wqbkd"] Oct 02 11:13:02 crc kubenswrapper[4783]: I1002 11:13:02.182970 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Oct 02 11:13:02 crc kubenswrapper[4783]: I1002 11:13:02.842353 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wqbkd" event={"ID":"ad34849d-3626-40fd-9ed2-a0c3064d12d0","Type":"ContainerStarted","Data":"999fd84a7365bcf4aed881f8104b78d2bd82eb72ebfc16617d9c46f673169487"} Oct 02 11:13:03 crc kubenswrapper[4783]: I1002 11:13:03.110955 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-ql48j"] Oct 02 11:13:03 crc kubenswrapper[4783]: I1002 11:13:03.853464 4783 generic.go:334] "Generic (PLEG): container finished" podID="ad34849d-3626-40fd-9ed2-a0c3064d12d0" containerID="a23a88861e89dccc770c76fd4dba478d3df409ad874799c840ca37eeae4e93bf" exitCode=0 Oct 02 11:13:03 crc kubenswrapper[4783]: I1002 11:13:03.853538 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wqbkd" event={"ID":"ad34849d-3626-40fd-9ed2-a0c3064d12d0","Type":"ContainerDied","Data":"a23a88861e89dccc770c76fd4dba478d3df409ad874799c840ca37eeae4e93bf"} Oct 02 11:13:03 crc kubenswrapper[4783]: W1002 11:13:03.885807 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb0930159_862f_47a8_9516_5b9aaf532653.slice/crio-da5c8127752e27f486b5f4397079b660d51c947e500d3506d2a3cc82b8ee435a WatchSource:0}: Error finding container da5c8127752e27f486b5f4397079b660d51c947e500d3506d2a3cc82b8ee435a: Status 404 returned error can't find the container with id da5c8127752e27f486b5f4397079b660d51c947e500d3506d2a3cc82b8ee435a Oct 02 11:13:04 crc kubenswrapper[4783]: I1002 11:13:04.864302 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"4ea494052e096eac9d6175e84ef58b69f6f0b248c968946074788fa5ea469445"} Oct 02 11:13:04 crc kubenswrapper[4783]: I1002 11:13:04.868541 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ql48j" event={"ID":"b0930159-862f-47a8-9516-5b9aaf532653","Type":"ContainerStarted","Data":"3743878d85cf7c490d69a9d93f05870314d6f007e054f6c244319497089995db"} Oct 02 11:13:04 crc kubenswrapper[4783]: I1002 11:13:04.868596 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ql48j" event={"ID":"b0930159-862f-47a8-9516-5b9aaf532653","Type":"ContainerStarted","Data":"da5c8127752e27f486b5f4397079b660d51c947e500d3506d2a3cc82b8ee435a"} Oct 02 11:13:04 crc kubenswrapper[4783]: I1002 11:13:04.889338 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-create-ql48j" podStartSLOduration=4.889321968 podStartE2EDuration="4.889321968s" podCreationTimestamp="2025-10-02 11:13:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:13:04.88829102 +0000 UTC m=+1218.204485291" watchObservedRunningTime="2025-10-02 11:13:04.889321968 +0000 UTC m=+1218.205516229" Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.464196 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wqbkd" Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.592188 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8qg4q\" (UniqueName: \"kubernetes.io/projected/ad34849d-3626-40fd-9ed2-a0c3064d12d0-kube-api-access-8qg4q\") pod \"ad34849d-3626-40fd-9ed2-a0c3064d12d0\" (UID: \"ad34849d-3626-40fd-9ed2-a0c3064d12d0\") " Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.597337 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad34849d-3626-40fd-9ed2-a0c3064d12d0-kube-api-access-8qg4q" (OuterVolumeSpecName: "kube-api-access-8qg4q") pod "ad34849d-3626-40fd-9ed2-a0c3064d12d0" (UID: "ad34849d-3626-40fd-9ed2-a0c3064d12d0"). InnerVolumeSpecName "kube-api-access-8qg4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.694283 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8qg4q\" (UniqueName: \"kubernetes.io/projected/ad34849d-3626-40fd-9ed2-a0c3064d12d0-kube-api-access-8qg4q\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.878984 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"29e912d429f92f7dd185d4c3bfc28c9dbc630e9b66ec560fef5aff862cb95edc"} Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.879058 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"4711d8f7d9e7dd605d44a1165d9841806b4f02156d643b774efd9c070623f515"} Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.880703 4783 generic.go:334] "Generic (PLEG): container finished" podID="b0930159-862f-47a8-9516-5b9aaf532653" containerID="3743878d85cf7c490d69a9d93f05870314d6f007e054f6c244319497089995db" exitCode=0 Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.880795 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ql48j" event={"ID":"b0930159-862f-47a8-9516-5b9aaf532653","Type":"ContainerDied","Data":"3743878d85cf7c490d69a9d93f05870314d6f007e054f6c244319497089995db"} Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.882718 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-wqbkd" event={"ID":"ad34849d-3626-40fd-9ed2-a0c3064d12d0","Type":"ContainerDied","Data":"999fd84a7365bcf4aed881f8104b78d2bd82eb72ebfc16617d9c46f673169487"} Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.882740 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="999fd84a7365bcf4aed881f8104b78d2bd82eb72ebfc16617d9c46f673169487" Oct 02 11:13:05 crc kubenswrapper[4783]: I1002 11:13:05.882766 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-wqbkd" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.291508 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-d6dk9"] Oct 02 11:13:06 crc kubenswrapper[4783]: E1002 11:13:06.291966 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad34849d-3626-40fd-9ed2-a0c3064d12d0" containerName="mariadb-database-create" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.291995 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad34849d-3626-40fd-9ed2-a0c3064d12d0" containerName="mariadb-database-create" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.292239 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad34849d-3626-40fd-9ed2-a0c3064d12d0" containerName="mariadb-database-create" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.292926 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d6dk9" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.302401 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-d6dk9"] Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.404004 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-65b9c\" (UniqueName: \"kubernetes.io/projected/a4e12345-bf38-4e17-bcc3-4e507b73e9b8-kube-api-access-65b9c\") pod \"glance-db-create-d6dk9\" (UID: \"a4e12345-bf38-4e17-bcc3-4e507b73e9b8\") " pod="openstack/glance-db-create-d6dk9" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.506159 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-65b9c\" (UniqueName: \"kubernetes.io/projected/a4e12345-bf38-4e17-bcc3-4e507b73e9b8-kube-api-access-65b9c\") pod \"glance-db-create-d6dk9\" (UID: \"a4e12345-bf38-4e17-bcc3-4e507b73e9b8\") " pod="openstack/glance-db-create-d6dk9" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.529631 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-65b9c\" (UniqueName: \"kubernetes.io/projected/a4e12345-bf38-4e17-bcc3-4e507b73e9b8-kube-api-access-65b9c\") pod \"glance-db-create-d6dk9\" (UID: \"a4e12345-bf38-4e17-bcc3-4e507b73e9b8\") " pod="openstack/glance-db-create-d6dk9" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.610970 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d6dk9" Oct 02 11:13:06 crc kubenswrapper[4783]: I1002 11:13:06.898204 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"7d21624fa600007d6cc18759e99e7836796b82c95efbe3979fa9296879b8c414"} Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.076857 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-d6dk9"] Oct 02 11:13:07 crc kubenswrapper[4783]: W1002 11:13:07.084879 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda4e12345_bf38_4e17_bcc3_4e507b73e9b8.slice/crio-febb8b84841752d06db204ffeb8c4997cfe8c5a58801839d2c81d79bf832d42b WatchSource:0}: Error finding container febb8b84841752d06db204ffeb8c4997cfe8c5a58801839d2c81d79bf832d42b: Status 404 returned error can't find the container with id febb8b84841752d06db204ffeb8c4997cfe8c5a58801839d2c81d79bf832d42b Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.245278 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ql48j" Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.421851 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvzw6\" (UniqueName: \"kubernetes.io/projected/b0930159-862f-47a8-9516-5b9aaf532653-kube-api-access-tvzw6\") pod \"b0930159-862f-47a8-9516-5b9aaf532653\" (UID: \"b0930159-862f-47a8-9516-5b9aaf532653\") " Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.426725 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b0930159-862f-47a8-9516-5b9aaf532653-kube-api-access-tvzw6" (OuterVolumeSpecName: "kube-api-access-tvzw6") pod "b0930159-862f-47a8-9516-5b9aaf532653" (UID: "b0930159-862f-47a8-9516-5b9aaf532653"). InnerVolumeSpecName "kube-api-access-tvzw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.523659 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvzw6\" (UniqueName: \"kubernetes.io/projected/b0930159-862f-47a8-9516-5b9aaf532653-kube-api-access-tvzw6\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.907199 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-ql48j" event={"ID":"b0930159-862f-47a8-9516-5b9aaf532653","Type":"ContainerDied","Data":"da5c8127752e27f486b5f4397079b660d51c947e500d3506d2a3cc82b8ee435a"} Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.907234 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-ql48j" Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.907257 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="da5c8127752e27f486b5f4397079b660d51c947e500d3506d2a3cc82b8ee435a" Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.913923 4783 generic.go:334] "Generic (PLEG): container finished" podID="a4e12345-bf38-4e17-bcc3-4e507b73e9b8" containerID="f02fcc448c68e9a5c7c8e5fbc7ee7d25dd17bb540bc05d430e935ca6c555308a" exitCode=0 Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.913957 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-d6dk9" event={"ID":"a4e12345-bf38-4e17-bcc3-4e507b73e9b8","Type":"ContainerDied","Data":"f02fcc448c68e9a5c7c8e5fbc7ee7d25dd17bb540bc05d430e935ca6c555308a"} Oct 02 11:13:07 crc kubenswrapper[4783]: I1002 11:13:07.913981 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-d6dk9" event={"ID":"a4e12345-bf38-4e17-bcc3-4e507b73e9b8","Type":"ContainerStarted","Data":"febb8b84841752d06db204ffeb8c4997cfe8c5a58801839d2c81d79bf832d42b"} Oct 02 11:13:08 crc kubenswrapper[4783]: I1002 11:13:08.936266 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"b28b73edb6f959b18082331b390a23c67493f29d48f6fa97f78d656a07bcc535"} Oct 02 11:13:08 crc kubenswrapper[4783]: I1002 11:13:08.936618 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"84e2605f6ddc57a2a760107e40e93f20f6718b2816cc33a8f2c48aabdf8c0806"} Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.364748 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d6dk9" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.467461 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-65b9c\" (UniqueName: \"kubernetes.io/projected/a4e12345-bf38-4e17-bcc3-4e507b73e9b8-kube-api-access-65b9c\") pod \"a4e12345-bf38-4e17-bcc3-4e507b73e9b8\" (UID: \"a4e12345-bf38-4e17-bcc3-4e507b73e9b8\") " Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.506761 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a4e12345-bf38-4e17-bcc3-4e507b73e9b8-kube-api-access-65b9c" (OuterVolumeSpecName: "kube-api-access-65b9c") pod "a4e12345-bf38-4e17-bcc3-4e507b73e9b8" (UID: "a4e12345-bf38-4e17-bcc3-4e507b73e9b8"). InnerVolumeSpecName "kube-api-access-65b9c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.568862 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-65b9c\" (UniqueName: \"kubernetes.io/projected/a4e12345-bf38-4e17-bcc3-4e507b73e9b8-kube-api-access-65b9c\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.889562 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-2btm9"] Oct 02 11:13:09 crc kubenswrapper[4783]: E1002 11:13:09.889976 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b0930159-862f-47a8-9516-5b9aaf532653" containerName="mariadb-database-create" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.889999 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b0930159-862f-47a8-9516-5b9aaf532653" containerName="mariadb-database-create" Oct 02 11:13:09 crc kubenswrapper[4783]: E1002 11:13:09.890034 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a4e12345-bf38-4e17-bcc3-4e507b73e9b8" containerName="mariadb-database-create" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.890043 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="a4e12345-bf38-4e17-bcc3-4e507b73e9b8" containerName="mariadb-database-create" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.890245 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="a4e12345-bf38-4e17-bcc3-4e507b73e9b8" containerName="mariadb-database-create" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.890269 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b0930159-862f-47a8-9516-5b9aaf532653" containerName="mariadb-database-create" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.890901 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-2btm9" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.899676 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-2btm9"] Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.944270 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-d6dk9" event={"ID":"a4e12345-bf38-4e17-bcc3-4e507b73e9b8","Type":"ContainerDied","Data":"febb8b84841752d06db204ffeb8c4997cfe8c5a58801839d2c81d79bf832d42b"} Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.944322 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="febb8b84841752d06db204ffeb8c4997cfe8c5a58801839d2c81d79bf832d42b" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.944281 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-d6dk9" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.951652 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"91269237bfe5774f038db458382d9760f008bf690943ede977bda98be5cd9942"} Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.951703 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"92ee8fc4da40abae25fc0c76cbe0313aaa4cad6ad8490d30a744485a40a0ecf0"} Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.951719 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"7bf8a96af7a3cbce6365b62454985c619537c654e13639050c82a071d348a0e3"} Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.951733 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"e773fa7a39de79d694c66e3f17cef07711e333c31ffe7b438ea93459a8c4c5cf"} Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.975982 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfmsg\" (UniqueName: \"kubernetes.io/projected/17badb90-21e0-40b3-b28f-15424c38ca7a-kube-api-access-wfmsg\") pod \"cinder-db-create-2btm9\" (UID: \"17badb90-21e0-40b3-b28f-15424c38ca7a\") " pod="openstack/cinder-db-create-2btm9" Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.983459 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-vndjn"] Oct 02 11:13:09 crc kubenswrapper[4783]: I1002 11:13:09.984700 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vndjn" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.001424 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-vndjn"] Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.080713 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfmsg\" (UniqueName: \"kubernetes.io/projected/17badb90-21e0-40b3-b28f-15424c38ca7a-kube-api-access-wfmsg\") pod \"cinder-db-create-2btm9\" (UID: \"17badb90-21e0-40b3-b28f-15424c38ca7a\") " pod="openstack/cinder-db-create-2btm9" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.081032 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcxzz\" (UniqueName: \"kubernetes.io/projected/cd068d52-b7f9-4d38-80bf-941cac6d3b39-kube-api-access-hcxzz\") pod \"barbican-db-create-vndjn\" (UID: \"cd068d52-b7f9-4d38-80bf-941cac6d3b39\") " pod="openstack/barbican-db-create-vndjn" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.086405 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-pqd5f"] Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.087630 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-pqd5f" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.104639 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfmsg\" (UniqueName: \"kubernetes.io/projected/17badb90-21e0-40b3-b28f-15424c38ca7a-kube-api-access-wfmsg\") pod \"cinder-db-create-2btm9\" (UID: \"17badb90-21e0-40b3-b28f-15424c38ca7a\") " pod="openstack/cinder-db-create-2btm9" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.104842 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-pqd5f"] Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.182109 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9snk8\" (UniqueName: \"kubernetes.io/projected/546476d1-d7f2-42d1-8cf0-dff8dbaa5f10-kube-api-access-9snk8\") pod \"neutron-db-create-pqd5f\" (UID: \"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10\") " pod="openstack/neutron-db-create-pqd5f" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.182549 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcxzz\" (UniqueName: \"kubernetes.io/projected/cd068d52-b7f9-4d38-80bf-941cac6d3b39-kube-api-access-hcxzz\") pod \"barbican-db-create-vndjn\" (UID: \"cd068d52-b7f9-4d38-80bf-941cac6d3b39\") " pod="openstack/barbican-db-create-vndjn" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.197355 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcxzz\" (UniqueName: \"kubernetes.io/projected/cd068d52-b7f9-4d38-80bf-941cac6d3b39-kube-api-access-hcxzz\") pod \"barbican-db-create-vndjn\" (UID: \"cd068d52-b7f9-4d38-80bf-941cac6d3b39\") " pod="openstack/barbican-db-create-vndjn" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.214049 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-2btm9" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.284459 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9snk8\" (UniqueName: \"kubernetes.io/projected/546476d1-d7f2-42d1-8cf0-dff8dbaa5f10-kube-api-access-9snk8\") pod \"neutron-db-create-pqd5f\" (UID: \"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10\") " pod="openstack/neutron-db-create-pqd5f" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.309183 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9snk8\" (UniqueName: \"kubernetes.io/projected/546476d1-d7f2-42d1-8cf0-dff8dbaa5f10-kube-api-access-9snk8\") pod \"neutron-db-create-pqd5f\" (UID: \"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10\") " pod="openstack/neutron-db-create-pqd5f" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.367782 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vndjn" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.419392 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-pqd5f" Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.618958 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-2btm9"] Oct 02 11:13:10 crc kubenswrapper[4783]: W1002 11:13:10.627200 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17badb90_21e0_40b3_b28f_15424c38ca7a.slice/crio-1ad86d45396a35e30af877f40dae8713fd63d9f51a8493343ef358558b17a321 WatchSource:0}: Error finding container 1ad86d45396a35e30af877f40dae8713fd63d9f51a8493343ef358558b17a321: Status 404 returned error can't find the container with id 1ad86d45396a35e30af877f40dae8713fd63d9f51a8493343ef358558b17a321 Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.839421 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-vndjn"] Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.944537 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-pqd5f"] Oct 02 11:13:10 crc kubenswrapper[4783]: W1002 11:13:10.962610 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod546476d1_d7f2_42d1_8cf0_dff8dbaa5f10.slice/crio-a4f39a433384cabc9bd14d5464320259cee5ed3a469d70c209408d037abf4683 WatchSource:0}: Error finding container a4f39a433384cabc9bd14d5464320259cee5ed3a469d70c209408d037abf4683: Status 404 returned error can't find the container with id a4f39a433384cabc9bd14d5464320259cee5ed3a469d70c209408d037abf4683 Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.974544 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"5c831884-5efd-4048-b7df-d0edf1d51e89","Type":"ContainerStarted","Data":"e25b7a2793a6d5fc394cf29ed4028d17ff1146ec295d321f8ed8cdf44d4cc616"} Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.976874 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-2btm9" event={"ID":"17badb90-21e0-40b3-b28f-15424c38ca7a","Type":"ContainerStarted","Data":"1ad86d45396a35e30af877f40dae8713fd63d9f51a8493343ef358558b17a321"} Oct 02 11:13:10 crc kubenswrapper[4783]: I1002 11:13:10.979195 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-vndjn" event={"ID":"cd068d52-b7f9-4d38-80bf-941cac6d3b39","Type":"ContainerStarted","Data":"196d8a5580adcab217a1e09d301a322cf702c37b92a614e063409e9422f0956e"} Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.021748 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=37.219493318 podStartE2EDuration="49.021726827s" podCreationTimestamp="2025-10-02 11:12:22 +0000 UTC" firstStartedPulling="2025-10-02 11:12:56.617395679 +0000 UTC m=+1209.933589940" lastFinishedPulling="2025-10-02 11:13:08.419629188 +0000 UTC m=+1221.735823449" observedRunningTime="2025-10-02 11:13:11.012585228 +0000 UTC m=+1224.328779489" watchObservedRunningTime="2025-10-02 11:13:11.021726827 +0000 UTC m=+1224.337921088" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.317627 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-n4xzc"] Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.319588 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.321808 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.395454 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-n4xzc"] Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.411997 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.412066 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.412293 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8tnf\" (UniqueName: \"kubernetes.io/projected/9e9cc646-1006-4d71-8ac6-74eb08755970-kube-api-access-w8tnf\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.412443 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.412480 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-config\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.412600 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.514085 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8tnf\" (UniqueName: \"kubernetes.io/projected/9e9cc646-1006-4d71-8ac6-74eb08755970-kube-api-access-w8tnf\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.514147 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.514170 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-config\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.514202 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.514238 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.514255 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.515076 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-sb\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.515626 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-swift-storage-0\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.516243 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-nb\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.516247 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-svc\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.516537 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-config\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.534093 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8tnf\" (UniqueName: \"kubernetes.io/projected/9e9cc646-1006-4d71-8ac6-74eb08755970-kube-api-access-w8tnf\") pod \"dnsmasq-dns-77585f5f8c-n4xzc\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.637242 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:11 crc kubenswrapper[4783]: I1002 11:13:11.919145 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-n4xzc"] Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.013168 4783 generic.go:334] "Generic (PLEG): container finished" podID="17badb90-21e0-40b3-b28f-15424c38ca7a" containerID="b1b04c86772704658a7a4ed863917efd35af162ad1746fc8c961fed2211dfb77" exitCode=0 Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.013259 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-2btm9" event={"ID":"17badb90-21e0-40b3-b28f-15424c38ca7a","Type":"ContainerDied","Data":"b1b04c86772704658a7a4ed863917efd35af162ad1746fc8c961fed2211dfb77"} Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.016826 4783 generic.go:334] "Generic (PLEG): container finished" podID="cd068d52-b7f9-4d38-80bf-941cac6d3b39" containerID="7069db6b24197a2f8b61e3892bfb81505504fe6a7ca3d8736e6e7c76df8bda29" exitCode=0 Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.016893 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-vndjn" event={"ID":"cd068d52-b7f9-4d38-80bf-941cac6d3b39","Type":"ContainerDied","Data":"7069db6b24197a2f8b61e3892bfb81505504fe6a7ca3d8736e6e7c76df8bda29"} Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.018909 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" event={"ID":"9e9cc646-1006-4d71-8ac6-74eb08755970","Type":"ContainerStarted","Data":"d7ec9950130f46914c33f1b26b58bf1822b545e92b7fd1862da7e9cfc42f0c5d"} Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.021702 4783 generic.go:334] "Generic (PLEG): container finished" podID="546476d1-d7f2-42d1-8cf0-dff8dbaa5f10" containerID="1000a4525ec9a400caaaa8778c9ec543673e87d9d8e0497e58b90fdc691b7af3" exitCode=0 Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.023052 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-pqd5f" event={"ID":"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10","Type":"ContainerDied","Data":"1000a4525ec9a400caaaa8778c9ec543673e87d9d8e0497e58b90fdc691b7af3"} Oct 02 11:13:12 crc kubenswrapper[4783]: I1002 11:13:12.023076 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-pqd5f" event={"ID":"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10","Type":"ContainerStarted","Data":"a4f39a433384cabc9bd14d5464320259cee5ed3a469d70c209408d037abf4683"} Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.029779 4783 generic.go:334] "Generic (PLEG): container finished" podID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerID="ce7d2f53a05d990cd59dc7d3d4b2f3bf9e3f68b43e4cb648f6c6873d9ed39a0f" exitCode=0 Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.029876 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" event={"ID":"9e9cc646-1006-4d71-8ac6-74eb08755970","Type":"ContainerDied","Data":"ce7d2f53a05d990cd59dc7d3d4b2f3bf9e3f68b43e4cb648f6c6873d9ed39a0f"} Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.358684 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vndjn" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.394080 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-2btm9" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.442802 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcxzz\" (UniqueName: \"kubernetes.io/projected/cd068d52-b7f9-4d38-80bf-941cac6d3b39-kube-api-access-hcxzz\") pod \"cd068d52-b7f9-4d38-80bf-941cac6d3b39\" (UID: \"cd068d52-b7f9-4d38-80bf-941cac6d3b39\") " Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.446763 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd068d52-b7f9-4d38-80bf-941cac6d3b39-kube-api-access-hcxzz" (OuterVolumeSpecName: "kube-api-access-hcxzz") pod "cd068d52-b7f9-4d38-80bf-941cac6d3b39" (UID: "cd068d52-b7f9-4d38-80bf-941cac6d3b39"). InnerVolumeSpecName "kube-api-access-hcxzz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.471481 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-pqd5f" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.544313 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9snk8\" (UniqueName: \"kubernetes.io/projected/546476d1-d7f2-42d1-8cf0-dff8dbaa5f10-kube-api-access-9snk8\") pod \"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10\" (UID: \"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10\") " Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.544361 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfmsg\" (UniqueName: \"kubernetes.io/projected/17badb90-21e0-40b3-b28f-15424c38ca7a-kube-api-access-wfmsg\") pod \"17badb90-21e0-40b3-b28f-15424c38ca7a\" (UID: \"17badb90-21e0-40b3-b28f-15424c38ca7a\") " Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.544721 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcxzz\" (UniqueName: \"kubernetes.io/projected/cd068d52-b7f9-4d38-80bf-941cac6d3b39-kube-api-access-hcxzz\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.547960 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/546476d1-d7f2-42d1-8cf0-dff8dbaa5f10-kube-api-access-9snk8" (OuterVolumeSpecName: "kube-api-access-9snk8") pod "546476d1-d7f2-42d1-8cf0-dff8dbaa5f10" (UID: "546476d1-d7f2-42d1-8cf0-dff8dbaa5f10"). InnerVolumeSpecName "kube-api-access-9snk8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.548181 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17badb90-21e0-40b3-b28f-15424c38ca7a-kube-api-access-wfmsg" (OuterVolumeSpecName: "kube-api-access-wfmsg") pod "17badb90-21e0-40b3-b28f-15424c38ca7a" (UID: "17badb90-21e0-40b3-b28f-15424c38ca7a"). InnerVolumeSpecName "kube-api-access-wfmsg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.646257 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9snk8\" (UniqueName: \"kubernetes.io/projected/546476d1-d7f2-42d1-8cf0-dff8dbaa5f10-kube-api-access-9snk8\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:13 crc kubenswrapper[4783]: I1002 11:13:13.646300 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfmsg\" (UniqueName: \"kubernetes.io/projected/17badb90-21e0-40b3-b28f-15424c38ca7a-kube-api-access-wfmsg\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.044049 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-pqd5f" event={"ID":"546476d1-d7f2-42d1-8cf0-dff8dbaa5f10","Type":"ContainerDied","Data":"a4f39a433384cabc9bd14d5464320259cee5ed3a469d70c209408d037abf4683"} Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.044096 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a4f39a433384cabc9bd14d5464320259cee5ed3a469d70c209408d037abf4683" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.044064 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-pqd5f" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.045903 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-2btm9" event={"ID":"17badb90-21e0-40b3-b28f-15424c38ca7a","Type":"ContainerDied","Data":"1ad86d45396a35e30af877f40dae8713fd63d9f51a8493343ef358558b17a321"} Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.045927 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1ad86d45396a35e30af877f40dae8713fd63d9f51a8493343ef358558b17a321" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.045958 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-2btm9" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.048131 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vndjn" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.048147 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-vndjn" event={"ID":"cd068d52-b7f9-4d38-80bf-941cac6d3b39","Type":"ContainerDied","Data":"196d8a5580adcab217a1e09d301a322cf702c37b92a614e063409e9422f0956e"} Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.048168 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="196d8a5580adcab217a1e09d301a322cf702c37b92a614e063409e9422f0956e" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.050680 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" event={"ID":"9e9cc646-1006-4d71-8ac6-74eb08755970","Type":"ContainerStarted","Data":"bdf71aa2917a5242d0327bfa7446400898fd0899cc9d3afdda31d9c052566ea7"} Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.050837 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:14 crc kubenswrapper[4783]: I1002 11:13:14.074448 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" podStartSLOduration=3.074380008 podStartE2EDuration="3.074380008s" podCreationTimestamp="2025-10-02 11:13:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:13:14.068783726 +0000 UTC m=+1227.384978027" watchObservedRunningTime="2025-10-02 11:13:14.074380008 +0000 UTC m=+1227.390574309" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.487305 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-da86-account-create-d4vxn"] Oct 02 11:13:16 crc kubenswrapper[4783]: E1002 11:13:16.488485 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd068d52-b7f9-4d38-80bf-941cac6d3b39" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.488522 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd068d52-b7f9-4d38-80bf-941cac6d3b39" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: E1002 11:13:16.488538 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="546476d1-d7f2-42d1-8cf0-dff8dbaa5f10" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.488545 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="546476d1-d7f2-42d1-8cf0-dff8dbaa5f10" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: E1002 11:13:16.488596 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17badb90-21e0-40b3-b28f-15424c38ca7a" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.488605 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="17badb90-21e0-40b3-b28f-15424c38ca7a" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.488853 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="546476d1-d7f2-42d1-8cf0-dff8dbaa5f10" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.488871 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="17badb90-21e0-40b3-b28f-15424c38ca7a" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.488909 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd068d52-b7f9-4d38-80bf-941cac6d3b39" containerName="mariadb-database-create" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.490214 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da86-account-create-d4vxn" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.491863 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.504807 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-da86-account-create-d4vxn"] Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.592668 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f9fz\" (UniqueName: \"kubernetes.io/projected/edb9f3db-8908-4bd6-a86e-9a46247458e7-kube-api-access-4f9fz\") pod \"glance-da86-account-create-d4vxn\" (UID: \"edb9f3db-8908-4bd6-a86e-9a46247458e7\") " pod="openstack/glance-da86-account-create-d4vxn" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.695196 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f9fz\" (UniqueName: \"kubernetes.io/projected/edb9f3db-8908-4bd6-a86e-9a46247458e7-kube-api-access-4f9fz\") pod \"glance-da86-account-create-d4vxn\" (UID: \"edb9f3db-8908-4bd6-a86e-9a46247458e7\") " pod="openstack/glance-da86-account-create-d4vxn" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.721783 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f9fz\" (UniqueName: \"kubernetes.io/projected/edb9f3db-8908-4bd6-a86e-9a46247458e7-kube-api-access-4f9fz\") pod \"glance-da86-account-create-d4vxn\" (UID: \"edb9f3db-8908-4bd6-a86e-9a46247458e7\") " pod="openstack/glance-da86-account-create-d4vxn" Oct 02 11:13:16 crc kubenswrapper[4783]: I1002 11:13:16.813676 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da86-account-create-d4vxn" Oct 02 11:13:17 crc kubenswrapper[4783]: I1002 11:13:17.063298 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-da86-account-create-d4vxn"] Oct 02 11:13:17 crc kubenswrapper[4783]: W1002 11:13:17.068367 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podedb9f3db_8908_4bd6_a86e_9a46247458e7.slice/crio-1272c07bc97b69d3b3808f4e6ce3ee663015cb8851b4f674bd82b50842ea9f67 WatchSource:0}: Error finding container 1272c07bc97b69d3b3808f4e6ce3ee663015cb8851b4f674bd82b50842ea9f67: Status 404 returned error can't find the container with id 1272c07bc97b69d3b3808f4e6ce3ee663015cb8851b4f674bd82b50842ea9f67 Oct 02 11:13:17 crc kubenswrapper[4783]: I1002 11:13:17.079062 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-da86-account-create-d4vxn" event={"ID":"edb9f3db-8908-4bd6-a86e-9a46247458e7","Type":"ContainerStarted","Data":"1272c07bc97b69d3b3808f4e6ce3ee663015cb8851b4f674bd82b50842ea9f67"} Oct 02 11:13:18 crc kubenswrapper[4783]: I1002 11:13:18.090685 4783 generic.go:334] "Generic (PLEG): container finished" podID="edb9f3db-8908-4bd6-a86e-9a46247458e7" containerID="e639993a15b5efac2876adbb33ea5e5cbb75aa4608deed6fcc8804e26a746f4c" exitCode=0 Oct 02 11:13:18 crc kubenswrapper[4783]: I1002 11:13:18.090764 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-da86-account-create-d4vxn" event={"ID":"edb9f3db-8908-4bd6-a86e-9a46247458e7","Type":"ContainerDied","Data":"e639993a15b5efac2876adbb33ea5e5cbb75aa4608deed6fcc8804e26a746f4c"} Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.456631 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da86-account-create-d4vxn" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.540958 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4f9fz\" (UniqueName: \"kubernetes.io/projected/edb9f3db-8908-4bd6-a86e-9a46247458e7-kube-api-access-4f9fz\") pod \"edb9f3db-8908-4bd6-a86e-9a46247458e7\" (UID: \"edb9f3db-8908-4bd6-a86e-9a46247458e7\") " Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.546394 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/edb9f3db-8908-4bd6-a86e-9a46247458e7-kube-api-access-4f9fz" (OuterVolumeSpecName: "kube-api-access-4f9fz") pod "edb9f3db-8908-4bd6-a86e-9a46247458e7" (UID: "edb9f3db-8908-4bd6-a86e-9a46247458e7"). InnerVolumeSpecName "kube-api-access-4f9fz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.643434 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4f9fz\" (UniqueName: \"kubernetes.io/projected/edb9f3db-8908-4bd6-a86e-9a46247458e7-kube-api-access-4f9fz\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.936126 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-98ca-account-create-l5h9r"] Oct 02 11:13:19 crc kubenswrapper[4783]: E1002 11:13:19.936637 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="edb9f3db-8908-4bd6-a86e-9a46247458e7" containerName="mariadb-account-create" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.936657 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="edb9f3db-8908-4bd6-a86e-9a46247458e7" containerName="mariadb-account-create" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.936855 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="edb9f3db-8908-4bd6-a86e-9a46247458e7" containerName="mariadb-account-create" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.937451 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-98ca-account-create-l5h9r" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.940303 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Oct 02 11:13:19 crc kubenswrapper[4783]: I1002 11:13:19.948378 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-98ca-account-create-l5h9r"] Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.050233 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bxccw\" (UniqueName: \"kubernetes.io/projected/df261fe1-316c-4f6c-828f-b0668ed6c1ee-kube-api-access-bxccw\") pod \"cinder-98ca-account-create-l5h9r\" (UID: \"df261fe1-316c-4f6c-828f-b0668ed6c1ee\") " pod="openstack/cinder-98ca-account-create-l5h9r" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.111241 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-da86-account-create-d4vxn" event={"ID":"edb9f3db-8908-4bd6-a86e-9a46247458e7","Type":"ContainerDied","Data":"1272c07bc97b69d3b3808f4e6ce3ee663015cb8851b4f674bd82b50842ea9f67"} Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.111518 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1272c07bc97b69d3b3808f4e6ce3ee663015cb8851b4f674bd82b50842ea9f67" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.111723 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-da86-account-create-d4vxn" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.131333 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-a166-account-create-h6f94"] Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.132834 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a166-account-create-h6f94" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.136304 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.152275 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bxccw\" (UniqueName: \"kubernetes.io/projected/df261fe1-316c-4f6c-828f-b0668ed6c1ee-kube-api-access-bxccw\") pod \"cinder-98ca-account-create-l5h9r\" (UID: \"df261fe1-316c-4f6c-828f-b0668ed6c1ee\") " pod="openstack/cinder-98ca-account-create-l5h9r" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.154255 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a166-account-create-h6f94"] Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.175493 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bxccw\" (UniqueName: \"kubernetes.io/projected/df261fe1-316c-4f6c-828f-b0668ed6c1ee-kube-api-access-bxccw\") pod \"cinder-98ca-account-create-l5h9r\" (UID: \"df261fe1-316c-4f6c-828f-b0668ed6c1ee\") " pod="openstack/cinder-98ca-account-create-l5h9r" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.254093 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxbjg\" (UniqueName: \"kubernetes.io/projected/4490b7d0-ae49-4417-b778-2050301901bc-kube-api-access-kxbjg\") pod \"barbican-a166-account-create-h6f94\" (UID: \"4490b7d0-ae49-4417-b778-2050301901bc\") " pod="openstack/barbican-a166-account-create-h6f94" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.268603 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-98ca-account-create-l5h9r" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.327243 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-a382-account-create-2ccl8"] Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.329360 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a382-account-create-2ccl8" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.331554 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.352026 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a382-account-create-2ccl8"] Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.355195 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxbjg\" (UniqueName: \"kubernetes.io/projected/4490b7d0-ae49-4417-b778-2050301901bc-kube-api-access-kxbjg\") pod \"barbican-a166-account-create-h6f94\" (UID: \"4490b7d0-ae49-4417-b778-2050301901bc\") " pod="openstack/barbican-a166-account-create-h6f94" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.374403 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxbjg\" (UniqueName: \"kubernetes.io/projected/4490b7d0-ae49-4417-b778-2050301901bc-kube-api-access-kxbjg\") pod \"barbican-a166-account-create-h6f94\" (UID: \"4490b7d0-ae49-4417-b778-2050301901bc\") " pod="openstack/barbican-a166-account-create-h6f94" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.453006 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a166-account-create-h6f94" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.456325 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rxm6p\" (UniqueName: \"kubernetes.io/projected/7600bb43-28bc-4d04-9fb5-6dcef8b0841f-kube-api-access-rxm6p\") pod \"neutron-a382-account-create-2ccl8\" (UID: \"7600bb43-28bc-4d04-9fb5-6dcef8b0841f\") " pod="openstack/neutron-a382-account-create-2ccl8" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.562034 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rxm6p\" (UniqueName: \"kubernetes.io/projected/7600bb43-28bc-4d04-9fb5-6dcef8b0841f-kube-api-access-rxm6p\") pod \"neutron-a382-account-create-2ccl8\" (UID: \"7600bb43-28bc-4d04-9fb5-6dcef8b0841f\") " pod="openstack/neutron-a382-account-create-2ccl8" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.585235 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rxm6p\" (UniqueName: \"kubernetes.io/projected/7600bb43-28bc-4d04-9fb5-6dcef8b0841f-kube-api-access-rxm6p\") pod \"neutron-a382-account-create-2ccl8\" (UID: \"7600bb43-28bc-4d04-9fb5-6dcef8b0841f\") " pod="openstack/neutron-a382-account-create-2ccl8" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.648036 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a382-account-create-2ccl8" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.717731 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-3b79-account-create-pkl5b"] Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.718799 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b79-account-create-pkl5b" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.723749 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.740596 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3b79-account-create-pkl5b"] Oct 02 11:13:20 crc kubenswrapper[4783]: W1002 11:13:20.748718 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf261fe1_316c_4f6c_828f_b0668ed6c1ee.slice/crio-a87a4506f6684bef7d54ee5c6e29546ac48b5c63c651db3d37d9ddee53f2c283 WatchSource:0}: Error finding container a87a4506f6684bef7d54ee5c6e29546ac48b5c63c651db3d37d9ddee53f2c283: Status 404 returned error can't find the container with id a87a4506f6684bef7d54ee5c6e29546ac48b5c63c651db3d37d9ddee53f2c283 Oct 02 11:13:20 crc kubenswrapper[4783]: I1002 11:13:20.753880 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-98ca-account-create-l5h9r"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.866771 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lq8cb\" (UniqueName: \"kubernetes.io/projected/1d0e3dc9-58c4-489e-aef0-462f7ba245ca-kube-api-access-lq8cb\") pod \"keystone-3b79-account-create-pkl5b\" (UID: \"1d0e3dc9-58c4-489e-aef0-462f7ba245ca\") " pod="openstack/keystone-3b79-account-create-pkl5b" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.899872 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-fea5-account-create-hh5rg"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.901900 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fea5-account-create-hh5rg" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.905167 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.924034 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-a166-account-create-h6f94"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.937568 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-fea5-account-create-hh5rg"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.968492 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-stl2n\" (UniqueName: \"kubernetes.io/projected/f366b995-e9d1-4ddf-98a9-3278480dca51-kube-api-access-stl2n\") pod \"placement-fea5-account-create-hh5rg\" (UID: \"f366b995-e9d1-4ddf-98a9-3278480dca51\") " pod="openstack/placement-fea5-account-create-hh5rg" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.968539 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lq8cb\" (UniqueName: \"kubernetes.io/projected/1d0e3dc9-58c4-489e-aef0-462f7ba245ca-kube-api-access-lq8cb\") pod \"keystone-3b79-account-create-pkl5b\" (UID: \"1d0e3dc9-58c4-489e-aef0-462f7ba245ca\") " pod="openstack/keystone-3b79-account-create-pkl5b" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:20.984137 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lq8cb\" (UniqueName: \"kubernetes.io/projected/1d0e3dc9-58c4-489e-aef0-462f7ba245ca-kube-api-access-lq8cb\") pod \"keystone-3b79-account-create-pkl5b\" (UID: \"1d0e3dc9-58c4-489e-aef0-462f7ba245ca\") " pod="openstack/keystone-3b79-account-create-pkl5b" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.045101 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b79-account-create-pkl5b" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.070561 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-stl2n\" (UniqueName: \"kubernetes.io/projected/f366b995-e9d1-4ddf-98a9-3278480dca51-kube-api-access-stl2n\") pod \"placement-fea5-account-create-hh5rg\" (UID: \"f366b995-e9d1-4ddf-98a9-3278480dca51\") " pod="openstack/placement-fea5-account-create-hh5rg" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.090685 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-stl2n\" (UniqueName: \"kubernetes.io/projected/f366b995-e9d1-4ddf-98a9-3278480dca51-kube-api-access-stl2n\") pod \"placement-fea5-account-create-hh5rg\" (UID: \"f366b995-e9d1-4ddf-98a9-3278480dca51\") " pod="openstack/placement-fea5-account-create-hh5rg" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.130251 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a166-account-create-h6f94" event={"ID":"4490b7d0-ae49-4417-b778-2050301901bc","Type":"ContainerStarted","Data":"c985f18fd02cf68faa2960cc2a54ba0508d2389594bed7ef3a487f4609d2f798"} Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.132348 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-98ca-account-create-l5h9r" event={"ID":"df261fe1-316c-4f6c-828f-b0668ed6c1ee","Type":"ContainerStarted","Data":"37db03a9a688a2f5a8f4865484a0f4983d591e8228ca6164ceed8236d79d3189"} Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.132366 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-98ca-account-create-l5h9r" event={"ID":"df261fe1-316c-4f6c-828f-b0668ed6c1ee","Type":"ContainerStarted","Data":"a87a4506f6684bef7d54ee5c6e29546ac48b5c63c651db3d37d9ddee53f2c283"} Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.217177 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fea5-account-create-hh5rg" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.638583 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.698322 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-dj9rv"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.698683 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-698758b865-dj9rv" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerName="dnsmasq-dns" containerID="cri-o://4779511c664e628477f2484c1056f3e0dc0884f821699adf6e92e5a3f491c0ee" gracePeriod=10 Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.799796 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-jq2kf"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.801110 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.803800 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.804079 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5lt5x" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.816461 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jq2kf"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.889527 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-config-data\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.889607 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbhlz\" (UniqueName: \"kubernetes.io/projected/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-kube-api-access-qbhlz\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.889744 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-db-sync-config-data\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.889796 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-combined-ca-bundle\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.991221 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbhlz\" (UniqueName: \"kubernetes.io/projected/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-kube-api-access-qbhlz\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.991312 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-db-sync-config-data\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.991343 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-combined-ca-bundle\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.991460 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-config-data\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.997238 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-config-data\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.998796 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-combined-ca-bundle\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:21.999806 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-db-sync-config-data\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.014811 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbhlz\" (UniqueName: \"kubernetes.io/projected/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-kube-api-access-qbhlz\") pod \"glance-db-sync-jq2kf\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.154089 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jq2kf" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.176972 4783 generic.go:334] "Generic (PLEG): container finished" podID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerID="4779511c664e628477f2484c1056f3e0dc0884f821699adf6e92e5a3f491c0ee" exitCode=0 Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.177026 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-dj9rv" event={"ID":"965b24b4-4f8c-464b-a1ef-a51077d6c553","Type":"ContainerDied","Data":"4779511c664e628477f2484c1056f3e0dc0884f821699adf6e92e5a3f491c0ee"} Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.182740 4783 generic.go:334] "Generic (PLEG): container finished" podID="df261fe1-316c-4f6c-828f-b0668ed6c1ee" containerID="37db03a9a688a2f5a8f4865484a0f4983d591e8228ca6164ceed8236d79d3189" exitCode=0 Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.182800 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-98ca-account-create-l5h9r" event={"ID":"df261fe1-316c-4f6c-828f-b0668ed6c1ee","Type":"ContainerDied","Data":"37db03a9a688a2f5a8f4865484a0f4983d591e8228ca6164ceed8236d79d3189"} Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.184272 4783 generic.go:334] "Generic (PLEG): container finished" podID="4490b7d0-ae49-4417-b778-2050301901bc" containerID="fd8ad7d0041a27208a3f301f3b2bbab6b46af9b32b0db4d7d0ad2921efd4cb6c" exitCode=0 Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:22.184302 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a166-account-create-h6f94" event={"ID":"4490b7d0-ae49-4417-b778-2050301901bc","Type":"ContainerDied","Data":"fd8ad7d0041a27208a3f301f3b2bbab6b46af9b32b0db4d7d0ad2921efd4cb6c"} Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.048988 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-698758b865-dj9rv" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.111:5353: connect: connection refused" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.723568 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-98ca-account-create-l5h9r" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.730177 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a166-account-create-h6f94" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.742790 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.822489 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-dns-svc\") pod \"965b24b4-4f8c-464b-a1ef-a51077d6c553\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.822548 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-sb\") pod \"965b24b4-4f8c-464b-a1ef-a51077d6c553\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.822629 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-nb\") pod \"965b24b4-4f8c-464b-a1ef-a51077d6c553\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.822718 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxbjg\" (UniqueName: \"kubernetes.io/projected/4490b7d0-ae49-4417-b778-2050301901bc-kube-api-access-kxbjg\") pod \"4490b7d0-ae49-4417-b778-2050301901bc\" (UID: \"4490b7d0-ae49-4417-b778-2050301901bc\") " Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.822754 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-config\") pod \"965b24b4-4f8c-464b-a1ef-a51077d6c553\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.822790 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bxccw\" (UniqueName: \"kubernetes.io/projected/df261fe1-316c-4f6c-828f-b0668ed6c1ee-kube-api-access-bxccw\") pod \"df261fe1-316c-4f6c-828f-b0668ed6c1ee\" (UID: \"df261fe1-316c-4f6c-828f-b0668ed6c1ee\") " Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.822931 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8r8wf\" (UniqueName: \"kubernetes.io/projected/965b24b4-4f8c-464b-a1ef-a51077d6c553-kube-api-access-8r8wf\") pod \"965b24b4-4f8c-464b-a1ef-a51077d6c553\" (UID: \"965b24b4-4f8c-464b-a1ef-a51077d6c553\") " Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.839568 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/965b24b4-4f8c-464b-a1ef-a51077d6c553-kube-api-access-8r8wf" (OuterVolumeSpecName: "kube-api-access-8r8wf") pod "965b24b4-4f8c-464b-a1ef-a51077d6c553" (UID: "965b24b4-4f8c-464b-a1ef-a51077d6c553"). InnerVolumeSpecName "kube-api-access-8r8wf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.839655 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4490b7d0-ae49-4417-b778-2050301901bc-kube-api-access-kxbjg" (OuterVolumeSpecName: "kube-api-access-kxbjg") pod "4490b7d0-ae49-4417-b778-2050301901bc" (UID: "4490b7d0-ae49-4417-b778-2050301901bc"). InnerVolumeSpecName "kube-api-access-kxbjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.861712 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df261fe1-316c-4f6c-828f-b0668ed6c1ee-kube-api-access-bxccw" (OuterVolumeSpecName: "kube-api-access-bxccw") pod "df261fe1-316c-4f6c-828f-b0668ed6c1ee" (UID: "df261fe1-316c-4f6c-828f-b0668ed6c1ee"). InnerVolumeSpecName "kube-api-access-bxccw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.879207 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-config" (OuterVolumeSpecName: "config") pod "965b24b4-4f8c-464b-a1ef-a51077d6c553" (UID: "965b24b4-4f8c-464b-a1ef-a51077d6c553"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.884552 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "965b24b4-4f8c-464b-a1ef-a51077d6c553" (UID: "965b24b4-4f8c-464b-a1ef-a51077d6c553"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.898750 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "965b24b4-4f8c-464b-a1ef-a51077d6c553" (UID: "965b24b4-4f8c-464b-a1ef-a51077d6c553"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.904436 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "965b24b4-4f8c-464b-a1ef-a51077d6c553" (UID: "965b24b4-4f8c-464b-a1ef-a51077d6c553"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.925038 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.925074 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bxccw\" (UniqueName: \"kubernetes.io/projected/df261fe1-316c-4f6c-828f-b0668ed6c1ee-kube-api-access-bxccw\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.925088 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8r8wf\" (UniqueName: \"kubernetes.io/projected/965b24b4-4f8c-464b-a1ef-a51077d6c553-kube-api-access-8r8wf\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.925102 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.925114 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.925124 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/965b24b4-4f8c-464b-a1ef-a51077d6c553-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.925134 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxbjg\" (UniqueName: \"kubernetes.io/projected/4490b7d0-ae49-4417-b778-2050301901bc-kube-api-access-kxbjg\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.968452 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-3b79-account-create-pkl5b"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.974559 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-a382-account-create-2ccl8"] Oct 02 11:13:23 crc kubenswrapper[4783]: I1002 11:13:23.980373 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-fea5-account-create-hh5rg"] Oct 02 11:13:23 crc kubenswrapper[4783]: W1002 11:13:23.982108 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7600bb43_28bc_4d04_9fb5_6dcef8b0841f.slice/crio-6c3a5dd2373ab92b3d3254b1ec32c41c9d6fb4173a77c30032858852d53671a2 WatchSource:0}: Error finding container 6c3a5dd2373ab92b3d3254b1ec32c41c9d6fb4173a77c30032858852d53671a2: Status 404 returned error can't find the container with id 6c3a5dd2373ab92b3d3254b1ec32c41c9d6fb4173a77c30032858852d53671a2 Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.096728 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-jq2kf"] Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.226110 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-698758b865-dj9rv" event={"ID":"965b24b4-4f8c-464b-a1ef-a51077d6c553","Type":"ContainerDied","Data":"af83fd00e6d6e5e2cf3e7335f46ac6cbeb5951f51e8d6ccd788e017a396ff9a1"} Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.226168 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-698758b865-dj9rv" Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.226574 4783 scope.go:117] "RemoveContainer" containerID="4779511c664e628477f2484c1056f3e0dc0884f821699adf6e92e5a3f491c0ee" Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.231975 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-98ca-account-create-l5h9r" Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.231993 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-98ca-account-create-l5h9r" event={"ID":"df261fe1-316c-4f6c-828f-b0668ed6c1ee","Type":"ContainerDied","Data":"a87a4506f6684bef7d54ee5c6e29546ac48b5c63c651db3d37d9ddee53f2c283"} Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.233551 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a87a4506f6684bef7d54ee5c6e29546ac48b5c63c651db3d37d9ddee53f2c283" Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.235357 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b79-account-create-pkl5b" event={"ID":"1d0e3dc9-58c4-489e-aef0-462f7ba245ca","Type":"ContainerStarted","Data":"f39f9ce1a43ab942f8b8e683aec940007dbe4c3c8af05b5b4cc7893576c0f498"} Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.239389 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-a166-account-create-h6f94" Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.239404 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-a166-account-create-h6f94" event={"ID":"4490b7d0-ae49-4417-b778-2050301901bc","Type":"ContainerDied","Data":"c985f18fd02cf68faa2960cc2a54ba0508d2389594bed7ef3a487f4609d2f798"} Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.239516 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c985f18fd02cf68faa2960cc2a54ba0508d2389594bed7ef3a487f4609d2f798" Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.243110 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jq2kf" event={"ID":"aea4b3d6-8814-424e-a0b0-2748b63f0bfd","Type":"ContainerStarted","Data":"a6f8a68ae861f6d02b46847f1ec0ea627e72f3e0c879eeab3bda06aaf2dcc00c"} Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.250330 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a382-account-create-2ccl8" event={"ID":"7600bb43-28bc-4d04-9fb5-6dcef8b0841f","Type":"ContainerStarted","Data":"6c3a5dd2373ab92b3d3254b1ec32c41c9d6fb4173a77c30032858852d53671a2"} Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.253860 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-fea5-account-create-hh5rg" event={"ID":"f366b995-e9d1-4ddf-98a9-3278480dca51","Type":"ContainerStarted","Data":"4b90c4c9e591e3a4695f020060e7b92b3b4b2585a9f6c64753ad2b7c26ad650c"} Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.256620 4783 scope.go:117] "RemoveContainer" containerID="a491ac10e653f3630e7b804cb28e021907cc6f48e19506894e4e1729c6c15ec7" Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.280243 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-698758b865-dj9rv"] Oct 02 11:13:24 crc kubenswrapper[4783]: I1002 11:13:24.287824 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-698758b865-dj9rv"] Oct 02 11:13:24 crc kubenswrapper[4783]: E1002 11:13:24.800068 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7600bb43_28bc_4d04_9fb5_6dcef8b0841f.slice/crio-efde3595061895401dfcee0fad6ea588c8de561210a6305942021a2175e68bb1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7600bb43_28bc_4d04_9fb5_6dcef8b0841f.slice/crio-conmon-efde3595061895401dfcee0fad6ea588c8de561210a6305942021a2175e68bb1.scope\": RecentStats: unable to find data in memory cache]" Oct 02 11:13:25 crc kubenswrapper[4783]: I1002 11:13:25.264113 4783 generic.go:334] "Generic (PLEG): container finished" podID="f366b995-e9d1-4ddf-98a9-3278480dca51" containerID="56e154c943c21f19bf8e2daf9bca77372df526985a61171ab96ef43dcd649bd9" exitCode=0 Oct 02 11:13:25 crc kubenswrapper[4783]: I1002 11:13:25.264225 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-fea5-account-create-hh5rg" event={"ID":"f366b995-e9d1-4ddf-98a9-3278480dca51","Type":"ContainerDied","Data":"56e154c943c21f19bf8e2daf9bca77372df526985a61171ab96ef43dcd649bd9"} Oct 02 11:13:25 crc kubenswrapper[4783]: I1002 11:13:25.270000 4783 generic.go:334] "Generic (PLEG): container finished" podID="1d0e3dc9-58c4-489e-aef0-462f7ba245ca" containerID="d90f99d0a30bf0b28248e4132694e61993f4f68d092135e789aa4df5612d61a3" exitCode=0 Oct 02 11:13:25 crc kubenswrapper[4783]: I1002 11:13:25.270062 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b79-account-create-pkl5b" event={"ID":"1d0e3dc9-58c4-489e-aef0-462f7ba245ca","Type":"ContainerDied","Data":"d90f99d0a30bf0b28248e4132694e61993f4f68d092135e789aa4df5612d61a3"} Oct 02 11:13:25 crc kubenswrapper[4783]: I1002 11:13:25.272149 4783 generic.go:334] "Generic (PLEG): container finished" podID="7600bb43-28bc-4d04-9fb5-6dcef8b0841f" containerID="efde3595061895401dfcee0fad6ea588c8de561210a6305942021a2175e68bb1" exitCode=0 Oct 02 11:13:25 crc kubenswrapper[4783]: I1002 11:13:25.272182 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a382-account-create-2ccl8" event={"ID":"7600bb43-28bc-4d04-9fb5-6dcef8b0841f","Type":"ContainerDied","Data":"efde3595061895401dfcee0fad6ea588c8de561210a6305942021a2175e68bb1"} Oct 02 11:13:25 crc kubenswrapper[4783]: I1002 11:13:25.557326 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" path="/var/lib/kubelet/pods/965b24b4-4f8c-464b-a1ef-a51077d6c553/volumes" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.674625 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a382-account-create-2ccl8" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.682196 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b79-account-create-pkl5b" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.714512 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fea5-account-create-hh5rg" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.773997 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rxm6p\" (UniqueName: \"kubernetes.io/projected/7600bb43-28bc-4d04-9fb5-6dcef8b0841f-kube-api-access-rxm6p\") pod \"7600bb43-28bc-4d04-9fb5-6dcef8b0841f\" (UID: \"7600bb43-28bc-4d04-9fb5-6dcef8b0841f\") " Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.774173 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lq8cb\" (UniqueName: \"kubernetes.io/projected/1d0e3dc9-58c4-489e-aef0-462f7ba245ca-kube-api-access-lq8cb\") pod \"1d0e3dc9-58c4-489e-aef0-462f7ba245ca\" (UID: \"1d0e3dc9-58c4-489e-aef0-462f7ba245ca\") " Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.775079 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-stl2n\" (UniqueName: \"kubernetes.io/projected/f366b995-e9d1-4ddf-98a9-3278480dca51-kube-api-access-stl2n\") pod \"f366b995-e9d1-4ddf-98a9-3278480dca51\" (UID: \"f366b995-e9d1-4ddf-98a9-3278480dca51\") " Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.780573 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7600bb43-28bc-4d04-9fb5-6dcef8b0841f-kube-api-access-rxm6p" (OuterVolumeSpecName: "kube-api-access-rxm6p") pod "7600bb43-28bc-4d04-9fb5-6dcef8b0841f" (UID: "7600bb43-28bc-4d04-9fb5-6dcef8b0841f"). InnerVolumeSpecName "kube-api-access-rxm6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.784781 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d0e3dc9-58c4-489e-aef0-462f7ba245ca-kube-api-access-lq8cb" (OuterVolumeSpecName: "kube-api-access-lq8cb") pod "1d0e3dc9-58c4-489e-aef0-462f7ba245ca" (UID: "1d0e3dc9-58c4-489e-aef0-462f7ba245ca"). InnerVolumeSpecName "kube-api-access-lq8cb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.792566 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f366b995-e9d1-4ddf-98a9-3278480dca51-kube-api-access-stl2n" (OuterVolumeSpecName: "kube-api-access-stl2n") pod "f366b995-e9d1-4ddf-98a9-3278480dca51" (UID: "f366b995-e9d1-4ddf-98a9-3278480dca51"). InnerVolumeSpecName "kube-api-access-stl2n". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.877353 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rxm6p\" (UniqueName: \"kubernetes.io/projected/7600bb43-28bc-4d04-9fb5-6dcef8b0841f-kube-api-access-rxm6p\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.877389 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lq8cb\" (UniqueName: \"kubernetes.io/projected/1d0e3dc9-58c4-489e-aef0-462f7ba245ca-kube-api-access-lq8cb\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:26 crc kubenswrapper[4783]: I1002 11:13:26.877401 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-stl2n\" (UniqueName: \"kubernetes.io/projected/f366b995-e9d1-4ddf-98a9-3278480dca51-kube-api-access-stl2n\") on node \"crc\" DevicePath \"\"" Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.292166 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-3b79-account-create-pkl5b" event={"ID":"1d0e3dc9-58c4-489e-aef0-462f7ba245ca","Type":"ContainerDied","Data":"f39f9ce1a43ab942f8b8e683aec940007dbe4c3c8af05b5b4cc7893576c0f498"} Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.292210 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f39f9ce1a43ab942f8b8e683aec940007dbe4c3c8af05b5b4cc7893576c0f498" Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.292186 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-3b79-account-create-pkl5b" Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.293841 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-a382-account-create-2ccl8" Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.293848 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-a382-account-create-2ccl8" event={"ID":"7600bb43-28bc-4d04-9fb5-6dcef8b0841f","Type":"ContainerDied","Data":"6c3a5dd2373ab92b3d3254b1ec32c41c9d6fb4173a77c30032858852d53671a2"} Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.293882 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c3a5dd2373ab92b3d3254b1ec32c41c9d6fb4173a77c30032858852d53671a2" Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.296270 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-fea5-account-create-hh5rg" event={"ID":"f366b995-e9d1-4ddf-98a9-3278480dca51","Type":"ContainerDied","Data":"4b90c4c9e591e3a4695f020060e7b92b3b4b2585a9f6c64753ad2b7c26ad650c"} Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.296296 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4b90c4c9e591e3a4695f020060e7b92b3b4b2585a9f6c64753ad2b7c26ad650c" Oct 02 11:13:27 crc kubenswrapper[4783]: I1002 11:13:27.296346 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-fea5-account-create-hh5rg" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.234053 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-qmrrk"] Oct 02 11:13:31 crc kubenswrapper[4783]: E1002 11:13:31.234937 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerName="dnsmasq-dns" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.234953 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerName="dnsmasq-dns" Oct 02 11:13:31 crc kubenswrapper[4783]: E1002 11:13:31.234961 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df261fe1-316c-4f6c-828f-b0668ed6c1ee" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.234967 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="df261fe1-316c-4f6c-828f-b0668ed6c1ee" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: E1002 11:13:31.234977 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7600bb43-28bc-4d04-9fb5-6dcef8b0841f" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.234983 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="7600bb43-28bc-4d04-9fb5-6dcef8b0841f" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: E1002 11:13:31.234998 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1d0e3dc9-58c4-489e-aef0-462f7ba245ca" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235004 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1d0e3dc9-58c4-489e-aef0-462f7ba245ca" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: E1002 11:13:31.235017 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerName="init" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235023 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerName="init" Oct 02 11:13:31 crc kubenswrapper[4783]: E1002 11:13:31.235035 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f366b995-e9d1-4ddf-98a9-3278480dca51" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235040 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f366b995-e9d1-4ddf-98a9-3278480dca51" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: E1002 11:13:31.235052 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4490b7d0-ae49-4417-b778-2050301901bc" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235059 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4490b7d0-ae49-4417-b778-2050301901bc" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235204 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4490b7d0-ae49-4417-b778-2050301901bc" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235224 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="f366b995-e9d1-4ddf-98a9-3278480dca51" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235235 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1d0e3dc9-58c4-489e-aef0-462f7ba245ca" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235245 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="7600bb43-28bc-4d04-9fb5-6dcef8b0841f" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235255 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="df261fe1-316c-4f6c-828f-b0668ed6c1ee" containerName="mariadb-account-create" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235263 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="965b24b4-4f8c-464b-a1ef-a51077d6c553" containerName="dnsmasq-dns" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.235771 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.238163 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.238806 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wgqxk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.252291 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.252519 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.261027 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qmrrk"] Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.353909 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-config-data\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.354087 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlbwl\" (UniqueName: \"kubernetes.io/projected/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-kube-api-access-vlbwl\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.354131 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-combined-ca-bundle\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.455532 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlbwl\" (UniqueName: \"kubernetes.io/projected/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-kube-api-access-vlbwl\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.455602 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-combined-ca-bundle\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.455659 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-config-data\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.462755 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-config-data\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.463432 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-combined-ca-bundle\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.471899 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlbwl\" (UniqueName: \"kubernetes.io/projected/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-kube-api-access-vlbwl\") pod \"keystone-db-sync-qmrrk\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:31 crc kubenswrapper[4783]: I1002 11:13:31.556254 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:13:47 crc kubenswrapper[4783]: E1002 11:13:47.169254 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api:current-podified" Oct 02 11:13:47 crc kubenswrapper[4783]: E1002 11:13:47.170038 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qbhlz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-jq2kf_openstack(aea4b3d6-8814-424e-a0b0-2748b63f0bfd): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:13:47 crc kubenswrapper[4783]: E1002 11:13:47.171288 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-jq2kf" podUID="aea4b3d6-8814-424e-a0b0-2748b63f0bfd" Oct 02 11:13:47 crc kubenswrapper[4783]: I1002 11:13:47.313475 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-qmrrk"] Oct 02 11:13:47 crc kubenswrapper[4783]: I1002 11:13:47.477565 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qmrrk" event={"ID":"c0c6de12-9394-4f1e-b50b-c5b54e840d5d","Type":"ContainerStarted","Data":"0b68f635a9353541e223a50861a548cfdee496854354ee5d045169f896373241"} Oct 02 11:13:47 crc kubenswrapper[4783]: E1002 11:13:47.479117 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api:current-podified\\\"\"" pod="openstack/glance-db-sync-jq2kf" podUID="aea4b3d6-8814-424e-a0b0-2748b63f0bfd" Oct 02 11:13:57 crc kubenswrapper[4783]: I1002 11:13:57.576007 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qmrrk" event={"ID":"c0c6de12-9394-4f1e-b50b-c5b54e840d5d","Type":"ContainerStarted","Data":"3ff6335580e338590b57c72739a560c6ae28c99def50ffd05d6f6e11755467e6"} Oct 02 11:13:57 crc kubenswrapper[4783]: I1002 11:13:57.596958 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-qmrrk" podStartSLOduration=16.592836531 podStartE2EDuration="26.59693265s" podCreationTimestamp="2025-10-02 11:13:31 +0000 UTC" firstStartedPulling="2025-10-02 11:13:47.325150264 +0000 UTC m=+1260.641344545" lastFinishedPulling="2025-10-02 11:13:57.329246403 +0000 UTC m=+1270.645440664" observedRunningTime="2025-10-02 11:13:57.592015816 +0000 UTC m=+1270.908210097" watchObservedRunningTime="2025-10-02 11:13:57.59693265 +0000 UTC m=+1270.913126931" Oct 02 11:14:00 crc kubenswrapper[4783]: I1002 11:14:00.602378 4783 generic.go:334] "Generic (PLEG): container finished" podID="c0c6de12-9394-4f1e-b50b-c5b54e840d5d" containerID="3ff6335580e338590b57c72739a560c6ae28c99def50ffd05d6f6e11755467e6" exitCode=0 Oct 02 11:14:00 crc kubenswrapper[4783]: I1002 11:14:00.602464 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qmrrk" event={"ID":"c0c6de12-9394-4f1e-b50b-c5b54e840d5d","Type":"ContainerDied","Data":"3ff6335580e338590b57c72739a560c6ae28c99def50ffd05d6f6e11755467e6"} Oct 02 11:14:01 crc kubenswrapper[4783]: I1002 11:14:01.909296 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.020984 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlbwl\" (UniqueName: \"kubernetes.io/projected/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-kube-api-access-vlbwl\") pod \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.021226 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-config-data\") pod \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.021335 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-combined-ca-bundle\") pod \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\" (UID: \"c0c6de12-9394-4f1e-b50b-c5b54e840d5d\") " Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.028322 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-kube-api-access-vlbwl" (OuterVolumeSpecName: "kube-api-access-vlbwl") pod "c0c6de12-9394-4f1e-b50b-c5b54e840d5d" (UID: "c0c6de12-9394-4f1e-b50b-c5b54e840d5d"). InnerVolumeSpecName "kube-api-access-vlbwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.109595 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c0c6de12-9394-4f1e-b50b-c5b54e840d5d" (UID: "c0c6de12-9394-4f1e-b50b-c5b54e840d5d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.120713 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-config-data" (OuterVolumeSpecName: "config-data") pod "c0c6de12-9394-4f1e-b50b-c5b54e840d5d" (UID: "c0c6de12-9394-4f1e-b50b-c5b54e840d5d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.124802 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlbwl\" (UniqueName: \"kubernetes.io/projected/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-kube-api-access-vlbwl\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.124838 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.124851 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c0c6de12-9394-4f1e-b50b-c5b54e840d5d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.622448 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-qmrrk" event={"ID":"c0c6de12-9394-4f1e-b50b-c5b54e840d5d","Type":"ContainerDied","Data":"0b68f635a9353541e223a50861a548cfdee496854354ee5d045169f896373241"} Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.622486 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0b68f635a9353541e223a50861a548cfdee496854354ee5d045169f896373241" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.622703 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-qmrrk" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.929327 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-h682n"] Oct 02 11:14:02 crc kubenswrapper[4783]: E1002 11:14:02.930089 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c0c6de12-9394-4f1e-b50b-c5b54e840d5d" containerName="keystone-db-sync" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.930109 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c0c6de12-9394-4f1e-b50b-c5b54e840d5d" containerName="keystone-db-sync" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.930295 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c0c6de12-9394-4f1e-b50b-c5b54e840d5d" containerName="keystone-db-sync" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.930945 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.932794 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.935692 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.940284 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.942754 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-mv8nq"] Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.947377 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.950819 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wgqxk" Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.963695 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-h682n"] Oct 02 11:14:02 crc kubenswrapper[4783]: I1002 11:14:02.993332 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-mv8nq"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048443 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f496b\" (UniqueName: \"kubernetes.io/projected/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-kube-api-access-f496b\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048544 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-svc\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048585 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rjjzn\" (UniqueName: \"kubernetes.io/projected/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-kube-api-access-rjjzn\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048665 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-config-data\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048698 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048748 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-credential-keys\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048804 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048833 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048856 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-fernet-keys\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048890 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.048918 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-scripts\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.052527 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-combined-ca-bundle\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.154761 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-scripts\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.154820 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-combined-ca-bundle\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.154863 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f496b\" (UniqueName: \"kubernetes.io/projected/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-kube-api-access-f496b\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.154919 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-svc\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.154950 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rjjzn\" (UniqueName: \"kubernetes.io/projected/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-kube-api-access-rjjzn\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.155009 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-config-data\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.155039 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.155078 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-credential-keys\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.155120 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.155150 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.155173 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-fernet-keys\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.155198 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.156162 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.156843 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-nb\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.157596 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-svc\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.158263 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-swift-storage-0\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.158701 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-sb\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.165129 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-combined-ca-bundle\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.165615 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-scripts\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.170254 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-config-data\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.180003 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-fernet-keys\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.182810 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-credential-keys\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.249801 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rjjzn\" (UniqueName: \"kubernetes.io/projected/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-kube-api-access-rjjzn\") pod \"keystone-bootstrap-h682n\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.286936 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.295592 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f496b\" (UniqueName: \"kubernetes.io/projected/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-kube-api-access-f496b\") pod \"dnsmasq-dns-55fff446b9-mv8nq\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.311047 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.315503 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-sqsnf"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.317388 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.324113 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.336994 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-bkgxf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.337114 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.396525 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-sqsnf"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.438388 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-547b75bc85-glzvn"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.440345 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.469066 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon-horizon-dockercfg-cznp9" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.469469 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-scripts" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.469613 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"horizon" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.469728 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"horizon-config-data" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.476514 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-547b75bc85-glzvn"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.497829 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-bh7g4"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.498956 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.507274 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.507571 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.507750 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7rp2v" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.509050 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-scripts\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.509309 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9e4fb56d-2565-4383-a883-a0c1eae40cb4-etc-machine-id\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.509484 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-config-data\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.509597 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jscxl\" (UniqueName: \"kubernetes.io/projected/9e4fb56d-2565-4383-a883-a0c1eae40cb4-kube-api-access-jscxl\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.509758 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-combined-ca-bundle\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.509936 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-db-sync-config-data\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.537927 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-bh7g4"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.572102 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-g9szh"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.573405 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.584581 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.585299 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-g9szh"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.586333 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-kzrxl" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612334 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/93f15260-265f-4b6f-96c9-f1beb4556c59-horizon-secret-key\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612439 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9e4fb56d-2565-4383-a883-a0c1eae40cb4-etc-machine-id\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612464 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-combined-ca-bundle\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612506 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-config-data\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612536 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jscxl\" (UniqueName: \"kubernetes.io/projected/9e4fb56d-2565-4383-a883-a0c1eae40cb4-kube-api-access-jscxl\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612592 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-combined-ca-bundle\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612619 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-plkf2\" (UniqueName: \"kubernetes.io/projected/479a79a2-f65b-443b-865a-bec4c138b978-kube-api-access-plkf2\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612652 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f15260-265f-4b6f-96c9-f1beb4556c59-logs\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612704 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-scripts\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612737 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-db-sync-config-data\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612760 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-config-data\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612782 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8q588\" (UniqueName: \"kubernetes.io/projected/93f15260-265f-4b6f-96c9-f1beb4556c59-kube-api-access-8q588\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612816 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-config\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.612847 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-scripts\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.613882 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9e4fb56d-2565-4383-a883-a0c1eae40cb4-etc-machine-id\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.617960 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.618931 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-db-sync-config-data\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.620204 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.621550 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-scripts\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.623207 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-config-data\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.626442 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.626742 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.645940 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-combined-ca-bundle\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.661745 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jscxl\" (UniqueName: \"kubernetes.io/projected/9e4fb56d-2565-4383-a883-a0c1eae40cb4-kube-api-access-jscxl\") pod \"cinder-db-sync-sqsnf\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.678183 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jq2kf" event={"ID":"aea4b3d6-8814-424e-a0b0-2748b63f0bfd","Type":"ContainerStarted","Data":"2aaa87d75cd2f7f0c066b1784385c64cc0de6811f713c16422f6cb7a24d61add"} Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.690200 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-mv8nq"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.717427 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnhr6\" (UniqueName: \"kubernetes.io/projected/193c55b5-888f-4738-b8f6-c075d2b396a5-kube-api-access-hnhr6\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.717474 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.717500 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-plkf2\" (UniqueName: \"kubernetes.io/projected/479a79a2-f65b-443b-865a-bec4c138b978-kube-api-access-plkf2\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.717519 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f15260-265f-4b6f-96c9-f1beb4556c59-logs\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.717537 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-scripts\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719440 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-log-httpd\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719477 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-scripts\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719498 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-config-data\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719514 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8q588\" (UniqueName: \"kubernetes.io/projected/93f15260-265f-4b6f-96c9-f1beb4556c59-kube-api-access-8q588\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719532 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-run-httpd\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719554 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-config\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719570 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qz4gg\" (UniqueName: \"kubernetes.io/projected/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-kube-api-access-qz4gg\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719592 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719616 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/93f15260-265f-4b6f-96c9-f1beb4556c59-horizon-secret-key\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719650 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-combined-ca-bundle\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719676 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-combined-ca-bundle\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719695 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-config-data\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.719733 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-db-sync-config-data\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.720318 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f15260-265f-4b6f-96c9-f1beb4556c59-logs\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.720855 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-scripts\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.721730 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-config-data\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.725460 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/93f15260-265f-4b6f-96c9-f1beb4556c59-horizon-secret-key\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.729507 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.735195 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-combined-ca-bundle\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.750373 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-plkf2\" (UniqueName: \"kubernetes.io/projected/479a79a2-f65b-443b-865a-bec4c138b978-kube-api-access-plkf2\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.752443 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-config\") pod \"neutron-db-sync-bh7g4\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.765187 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8q588\" (UniqueName: \"kubernetes.io/projected/93f15260-265f-4b6f-96c9-f1beb4556c59-kube-api-access-8q588\") pod \"horizon-547b75bc85-glzvn\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.771308 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.773071 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-l9zch"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.774393 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.780173 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.786034 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-kfzjc" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.786284 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.817670 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-l9zch"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822603 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-log-httpd\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822670 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-run-httpd\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822697 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qz4gg\" (UniqueName: \"kubernetes.io/projected/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-kube-api-access-qz4gg\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822732 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822768 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-combined-ca-bundle\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822834 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-config-data\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822884 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-db-sync-config-data\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822906 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnhr6\" (UniqueName: \"kubernetes.io/projected/193c55b5-888f-4738-b8f6-c075d2b396a5-kube-api-access-hnhr6\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822933 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.822956 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-scripts\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.823556 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-log-httpd\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.824347 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-run-httpd\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.839552 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-db-sync-config-data\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.839921 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.846864 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-scripts\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.853595 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-zd229"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.854979 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.859255 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-jq2kf" podStartSLOduration=4.962787129 podStartE2EDuration="42.859240896s" podCreationTimestamp="2025-10-02 11:13:21 +0000 UTC" firstStartedPulling="2025-10-02 11:13:24.120465779 +0000 UTC m=+1237.436660050" lastFinishedPulling="2025-10-02 11:14:02.016919566 +0000 UTC m=+1275.333113817" observedRunningTime="2025-10-02 11:14:03.729478554 +0000 UTC m=+1277.045672815" watchObservedRunningTime="2025-10-02 11:14:03.859240896 +0000 UTC m=+1277.175435157" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.866985 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-config-data\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.867889 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fcdf54855-qfzfs"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.871808 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.874657 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qz4gg\" (UniqueName: \"kubernetes.io/projected/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-kube-api-access-qz4gg\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.884159 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.901801 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fcdf54855-qfzfs"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.903027 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.903746 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnhr6\" (UniqueName: \"kubernetes.io/projected/193c55b5-888f-4738-b8f6-c075d2b396a5-kube-api-access-hnhr6\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.915984 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " pod="openstack/ceilometer-0" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.923463 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-combined-ca-bundle\") pod \"barbican-db-sync-g9szh\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.923970 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-scripts\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924084 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6fn5j\" (UniqueName: \"kubernetes.io/projected/6c42c3ba-c130-4b8d-940a-9aa134629554-kube-api-access-6fn5j\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924159 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-st9j9\" (UniqueName: \"kubernetes.io/projected/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-kube-api-access-st9j9\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924239 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924319 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-config-data\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924475 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924552 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c42c3ba-c130-4b8d-940a-9aa134629554-logs\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924638 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924712 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924768 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.924841 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-config\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.925056 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-zd229"] Oct 02 11:14:03 crc kubenswrapper[4783]: I1002 11:14:03.982089 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.026149 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-config-data\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.027106 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.027196 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c42c3ba-c130-4b8d-940a-9aa134629554-logs\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.027291 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.027358 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.027437 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.027791 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c42c3ba-c130-4b8d-940a-9aa134629554-logs\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.027890 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-scripts\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028142 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-horizon-secret-key\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028226 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5r6w\" (UniqueName: \"kubernetes.io/projected/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-kube-api-access-x5r6w\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028296 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-config\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028457 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-nb\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028471 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-scripts\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028018 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-swift-storage-0\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028534 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-logs\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028603 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-svc\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028611 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6fn5j\" (UniqueName: \"kubernetes.io/projected/6c42c3ba-c130-4b8d-940a-9aa134629554-kube-api-access-6fn5j\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028656 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-config-data\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028710 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-st9j9\" (UniqueName: \"kubernetes.io/projected/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-kube-api-access-st9j9\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.028756 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.034801 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-config\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.038285 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-config-data\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.043575 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.049252 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-st9j9\" (UniqueName: \"kubernetes.io/projected/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-kube-api-access-st9j9\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.050274 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-sb\") pod \"dnsmasq-dns-76fcf4b695-zd229\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.054908 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-scripts\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.066592 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6fn5j\" (UniqueName: \"kubernetes.io/projected/6c42c3ba-c130-4b8d-940a-9aa134629554-kube-api-access-6fn5j\") pod \"placement-db-sync-l9zch\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.136870 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-scripts\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.137230 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-horizon-secret-key\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.137268 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5r6w\" (UniqueName: \"kubernetes.io/projected/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-kube-api-access-x5r6w\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.137351 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-logs\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.137455 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-config-data\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.137874 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-l9zch" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.138261 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-scripts\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.138814 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-logs\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.139800 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-config-data\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.150925 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-horizon-secret-key\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.167032 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5r6w\" (UniqueName: \"kubernetes.io/projected/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-kube-api-access-x5r6w\") pod \"horizon-5fcdf54855-qfzfs\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.205769 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g9szh" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.210652 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.217846 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.350822 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-h682n"] Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.363943 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-mv8nq"] Oct 02 11:14:04 crc kubenswrapper[4783]: W1002 11:14:04.395924 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b19ba47_6de0_4bc7_9e0a_6fb5ea3d8cbf.slice/crio-589298b17dc5881733c034323175d5504c9e62ecf1b3ecd5c2ff992673bc78df WatchSource:0}: Error finding container 589298b17dc5881733c034323175d5504c9e62ecf1b3ecd5c2ff992673bc78df: Status 404 returned error can't find the container with id 589298b17dc5881733c034323175d5504c9e62ecf1b3ecd5c2ff992673bc78df Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.719145 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-sqsnf"] Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.830820 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" event={"ID":"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf","Type":"ContainerStarted","Data":"589298b17dc5881733c034323175d5504c9e62ecf1b3ecd5c2ff992673bc78df"} Oct 02 11:14:04 crc kubenswrapper[4783]: I1002 11:14:04.841699 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h682n" event={"ID":"c91175c5-8c5b-4e52-aeb1-6a6f181803a7","Type":"ContainerStarted","Data":"4be1fdcd2bc43564343385e369d13cc05490fd2d6054e16357c6640af6ac45dd"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.012034 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-bh7g4"] Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.047788 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.064669 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-547b75bc85-glzvn"] Oct 02 11:14:05 crc kubenswrapper[4783]: W1002 11:14:05.065114 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod93f15260_265f_4b6f_96c9_f1beb4556c59.slice/crio-20eb04823c03896b1c62593a8b99afaf28f1cefd09c80d23d1303516308cbff6 WatchSource:0}: Error finding container 20eb04823c03896b1c62593a8b99afaf28f1cefd09c80d23d1303516308cbff6: Status 404 returned error can't find the container with id 20eb04823c03896b1c62593a8b99afaf28f1cefd09c80d23d1303516308cbff6 Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.145007 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-l9zch"] Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.292192 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-zd229"] Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.318109 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fcdf54855-qfzfs"] Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.324959 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-g9szh"] Oct 02 11:14:05 crc kubenswrapper[4783]: E1002 11:14:05.739145 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b19ba47_6de0_4bc7_9e0a_6fb5ea3d8cbf.slice/crio-77716b5b134745045ef13a324921d7998f3d64a0e1ddc9c1b3df46f58e78e2d1.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1b19ba47_6de0_4bc7_9e0a_6fb5ea3d8cbf.slice/crio-conmon-77716b5b134745045ef13a324921d7998f3d64a0e1ddc9c1b3df46f58e78e2d1.scope\": RecentStats: unable to find data in memory cache]" Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.890658 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf54855-qfzfs" event={"ID":"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898","Type":"ContainerStarted","Data":"90511279d6f6a8dea9511165cc6732b44daf32b2de9b9815f180f79c2e522579"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.897592 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqsnf" event={"ID":"9e4fb56d-2565-4383-a883-a0c1eae40cb4","Type":"ContainerStarted","Data":"40f54dcf67c75a81b071081b86e4f29463caca99f8439de48b5c967ed2c6db53"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.903124 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g9szh" event={"ID":"68c31bf9-b59a-43ed-bb74-9e6cc0bce703","Type":"ContainerStarted","Data":"666d6ee25b993c942fe5560cfaf5049cc4abdb77d5ba838471629d093c775eca"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.908766 4783 generic.go:334] "Generic (PLEG): container finished" podID="1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" containerID="77716b5b134745045ef13a324921d7998f3d64a0e1ddc9c1b3df46f58e78e2d1" exitCode=0 Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.908931 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" event={"ID":"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf","Type":"ContainerDied","Data":"77716b5b134745045ef13a324921d7998f3d64a0e1ddc9c1b3df46f58e78e2d1"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.932073 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h682n" event={"ID":"c91175c5-8c5b-4e52-aeb1-6a6f181803a7","Type":"ContainerStarted","Data":"4266001ae8854435bf1bb79107b79c61b25ece67b50254a237dc0f2dba2d06bf"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.956876 4783 generic.go:334] "Generic (PLEG): container finished" podID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerID="cf943614091729b07abe51e9632bbf7489fc0b8ded142b3fb65b60c4b0112d8c" exitCode=0 Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.956964 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" event={"ID":"ab6e0516-5a5b-45c3-9f57-f1181fd69bac","Type":"ContainerDied","Data":"cf943614091729b07abe51e9632bbf7489fc0b8ded142b3fb65b60c4b0112d8c"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.956994 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" event={"ID":"ab6e0516-5a5b-45c3-9f57-f1181fd69bac","Type":"ContainerStarted","Data":"4c667ed5e1b4d8e45b9e0a393110b53926820953d7c1036fd41db7fd34c1a981"} Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.979477 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-h682n" podStartSLOduration=3.979461133 podStartE2EDuration="3.979461133s" podCreationTimestamp="2025-10-02 11:14:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:14:05.974913139 +0000 UTC m=+1279.291107400" watchObservedRunningTime="2025-10-02 11:14:05.979461133 +0000 UTC m=+1279.295655394" Oct 02 11:14:05 crc kubenswrapper[4783]: I1002 11:14:05.995852 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerStarted","Data":"91e56996ca82437763b8805557846cda31c00af7c1199b0f5ea4eac443f36f0b"} Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.012597 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-547b75bc85-glzvn"] Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.029679 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-l9zch" event={"ID":"6c42c3ba-c130-4b8d-940a-9aa134629554","Type":"ContainerStarted","Data":"ef08a280052523ad2bdcf6eaf8420f429605f264aca3f5922e59ebc4573e9c6b"} Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.069737 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bh7g4" event={"ID":"479a79a2-f65b-443b-865a-bec4c138b978","Type":"ContainerStarted","Data":"31ba9fc88407b90063a3b07e92a4f172cd98c0503845642ff22552a9d5ec1cdf"} Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.069785 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bh7g4" event={"ID":"479a79a2-f65b-443b-865a-bec4c138b978","Type":"ContainerStarted","Data":"170b4f835207ff2aa61e9dcb981ed0b184ac7de2ea476faed698a38b3f43232a"} Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.086976 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-547b75bc85-glzvn" event={"ID":"93f15260-265f-4b6f-96c9-f1beb4556c59","Type":"ContainerStarted","Data":"20eb04823c03896b1c62593a8b99afaf28f1cefd09c80d23d1303516308cbff6"} Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.115504 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-84f9c985fc-t6l7c"] Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.124151 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.148490 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.167097 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-bh7g4" podStartSLOduration=3.167078465 podStartE2EDuration="3.167078465s" podCreationTimestamp="2025-10-02 11:14:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:14:06.109292207 +0000 UTC m=+1279.425486468" watchObservedRunningTime="2025-10-02 11:14:06.167078465 +0000 UTC m=+1279.483272726" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.180383 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84f9c985fc-t6l7c"] Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.221006 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-config-data\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.221072 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-horizon-secret-key\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.221131 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-scripts\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.221160 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d65jt\" (UniqueName: \"kubernetes.io/projected/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-kube-api-access-d65jt\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.221176 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-logs\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.326350 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-config-data\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.326471 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-horizon-secret-key\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.326551 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-scripts\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.326583 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d65jt\" (UniqueName: \"kubernetes.io/projected/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-kube-api-access-d65jt\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.326603 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-logs\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.327042 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-logs\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.328253 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-config-data\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.329737 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-scripts\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.344124 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-horizon-secret-key\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.358691 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d65jt\" (UniqueName: \"kubernetes.io/projected/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-kube-api-access-d65jt\") pod \"horizon-84f9c985fc-t6l7c\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.537953 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.670058 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.762773 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f496b\" (UniqueName: \"kubernetes.io/projected/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-kube-api-access-f496b\") pod \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.762863 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config\") pod \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.762883 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-nb\") pod \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.762996 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-svc\") pod \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.763018 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-swift-storage-0\") pod \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.763111 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-sb\") pod \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.772475 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-kube-api-access-f496b" (OuterVolumeSpecName: "kube-api-access-f496b") pod "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" (UID: "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf"). InnerVolumeSpecName "kube-api-access-f496b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.797265 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" (UID: "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.805681 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" (UID: "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.814061 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" (UID: "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:06 crc kubenswrapper[4783]: E1002 11:14:06.822116 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config podName:1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf nodeName:}" failed. No retries permitted until 2025-10-02 11:14:07.322086575 +0000 UTC m=+1280.638280836 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "config" (UniqueName: "kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config") pod "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" (UID: "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf") : error deleting /var/lib/kubelet/pods/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf/volume-subpaths: remove /var/lib/kubelet/pods/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf/volume-subpaths: no such file or directory Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.822386 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" (UID: "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.864875 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.864917 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.864933 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.864945 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f496b\" (UniqueName: \"kubernetes.io/projected/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-kube-api-access-f496b\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:06 crc kubenswrapper[4783]: I1002 11:14:06.864956 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.106302 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" event={"ID":"ab6e0516-5a5b-45c3-9f57-f1181fd69bac","Type":"ContainerStarted","Data":"99b9b24e483e972c6d5666b93e2b81580646ed22dc0c672dce56a348447713b1"} Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.106756 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.121704 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.122042 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-55fff446b9-mv8nq" event={"ID":"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf","Type":"ContainerDied","Data":"589298b17dc5881733c034323175d5504c9e62ecf1b3ecd5c2ff992673bc78df"} Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.122082 4783 scope.go:117] "RemoveContainer" containerID="77716b5b134745045ef13a324921d7998f3d64a0e1ddc9c1b3df46f58e78e2d1" Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.129732 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-84f9c985fc-t6l7c"] Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.159226 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" podStartSLOduration=4.159204418 podStartE2EDuration="4.159204418s" podCreationTimestamp="2025-10-02 11:14:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:14:07.151775625 +0000 UTC m=+1280.467969886" watchObservedRunningTime="2025-10-02 11:14:07.159204418 +0000 UTC m=+1280.475398699" Oct 02 11:14:07 crc kubenswrapper[4783]: W1002 11:14:07.190060 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod61fe47fc_c4a7_4f02_8629_04abe2e0eb47.slice/crio-e6cd161be7d4147e4fbf4e0235ecae4290278648cf7477bf5961c3dc36154d70 WatchSource:0}: Error finding container e6cd161be7d4147e4fbf4e0235ecae4290278648cf7477bf5961c3dc36154d70: Status 404 returned error can't find the container with id e6cd161be7d4147e4fbf4e0235ecae4290278648cf7477bf5961c3dc36154d70 Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.377097 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config\") pod \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\" (UID: \"1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf\") " Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.377533 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config" (OuterVolumeSpecName: "config") pod "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" (UID: "1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.377839 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.489190 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-mv8nq"] Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.494630 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-55fff446b9-mv8nq"] Oct 02 11:14:07 crc kubenswrapper[4783]: I1002 11:14:07.556554 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" path="/var/lib/kubelet/pods/1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf/volumes" Oct 02 11:14:08 crc kubenswrapper[4783]: I1002 11:14:08.141003 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84f9c985fc-t6l7c" event={"ID":"61fe47fc-c4a7-4f02-8629-04abe2e0eb47","Type":"ContainerStarted","Data":"e6cd161be7d4147e4fbf4e0235ecae4290278648cf7477bf5961c3dc36154d70"} Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.617650 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fcdf54855-qfzfs"] Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.648263 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-5fcdf587dd-wvthh"] Oct 02 11:14:12 crc kubenswrapper[4783]: E1002 11:14:12.648631 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" containerName="init" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.648649 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" containerName="init" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.648823 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b19ba47-6de0-4bc7-9e0a-6fb5ea3d8cbf" containerName="init" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.649681 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.653536 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-horizon-svc" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.666741 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fcdf587dd-wvthh"] Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.709152 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-llrd6\" (UniqueName: \"kubernetes.io/projected/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-kube-api-access-llrd6\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.709213 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-tls-certs\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.709261 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-secret-key\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.709285 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-logs\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.709370 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-combined-ca-bundle\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.709392 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-scripts\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.709427 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-config-data\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.726654 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84f9c985fc-t6l7c"] Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.788702 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/horizon-567b57d86d-gv6fq"] Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.790120 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.813138 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-combined-ca-bundle\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.813197 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-scripts\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.813227 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-config-data\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.813252 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-llrd6\" (UniqueName: \"kubernetes.io/projected/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-kube-api-access-llrd6\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.813282 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-tls-certs\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.813336 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-secret-key\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.813358 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-logs\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.828977 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-scripts\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.829687 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-tls-certs\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.830691 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-logs\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.831150 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-secret-key\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.832223 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-config-data\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.839184 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-combined-ca-bundle\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.853583 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-567b57d86d-gv6fq"] Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.872604 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-llrd6\" (UniqueName: \"kubernetes.io/projected/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-kube-api-access-llrd6\") pod \"horizon-5fcdf587dd-wvthh\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.915962 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48c11fb6-76f0-4028-a76f-6f67904bf3aa-scripts\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.916008 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48c11fb6-76f0-4028-a76f-6f67904bf3aa-logs\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.916060 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-horizon-secret-key\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.916114 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-horizon-tls-certs\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.916149 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-combined-ca-bundle\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.916164 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2h2dv\" (UniqueName: \"kubernetes.io/projected/48c11fb6-76f0-4028-a76f-6f67904bf3aa-kube-api-access-2h2dv\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.916181 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/48c11fb6-76f0-4028-a76f-6f67904bf3aa-config-data\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:12 crc kubenswrapper[4783]: I1002 11:14:12.979897 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.017938 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48c11fb6-76f0-4028-a76f-6f67904bf3aa-scripts\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.017981 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48c11fb6-76f0-4028-a76f-6f67904bf3aa-logs\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.018032 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-horizon-secret-key\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.018092 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-horizon-tls-certs\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.018127 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-combined-ca-bundle\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.018145 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2h2dv\" (UniqueName: \"kubernetes.io/projected/48c11fb6-76f0-4028-a76f-6f67904bf3aa-kube-api-access-2h2dv\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.018161 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/48c11fb6-76f0-4028-a76f-6f67904bf3aa-config-data\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.018645 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/48c11fb6-76f0-4028-a76f-6f67904bf3aa-logs\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.018755 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/48c11fb6-76f0-4028-a76f-6f67904bf3aa-scripts\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.019480 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/48c11fb6-76f0-4028-a76f-6f67904bf3aa-config-data\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.022723 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-combined-ca-bundle\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.026133 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-horizon-tls-certs\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.037603 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2h2dv\" (UniqueName: \"kubernetes.io/projected/48c11fb6-76f0-4028-a76f-6f67904bf3aa-kube-api-access-2h2dv\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.038330 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/48c11fb6-76f0-4028-a76f-6f67904bf3aa-horizon-secret-key\") pod \"horizon-567b57d86d-gv6fq\" (UID: \"48c11fb6-76f0-4028-a76f-6f67904bf3aa\") " pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:13 crc kubenswrapper[4783]: I1002 11:14:13.112897 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:14:14 crc kubenswrapper[4783]: I1002 11:14:14.212579 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:14:14 crc kubenswrapper[4783]: I1002 11:14:14.269890 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-n4xzc"] Oct 02 11:14:14 crc kubenswrapper[4783]: I1002 11:14:14.270209 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="dnsmasq-dns" containerID="cri-o://bdf71aa2917a5242d0327bfa7446400898fd0899cc9d3afdda31d9c052566ea7" gracePeriod=10 Oct 02 11:14:15 crc kubenswrapper[4783]: I1002 11:14:15.213825 4783 generic.go:334] "Generic (PLEG): container finished" podID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerID="bdf71aa2917a5242d0327bfa7446400898fd0899cc9d3afdda31d9c052566ea7" exitCode=0 Oct 02 11:14:15 crc kubenswrapper[4783]: I1002 11:14:15.213897 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" event={"ID":"9e9cc646-1006-4d71-8ac6-74eb08755970","Type":"ContainerDied","Data":"bdf71aa2917a5242d0327bfa7446400898fd0899cc9d3afdda31d9c052566ea7"} Oct 02 11:14:21 crc kubenswrapper[4783]: I1002 11:14:21.514027 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:14:21 crc kubenswrapper[4783]: I1002 11:14:21.515291 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:14:21 crc kubenswrapper[4783]: I1002 11:14:21.638218 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: i/o timeout" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.614181 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:14:26 crc kubenswrapper[4783]: E1002 11:14:26.622399 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" Oct 02 11:14:26 crc kubenswrapper[4783]: E1002 11:14:26.622580 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:placement-db-sync,Image:quay.io/podified-antelope-centos9/openstack-placement-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/placement,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:false,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:placement-dbsync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-6fn5j,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42482,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod placement-db-sync-l9zch_openstack(6c42c3ba-c130-4b8d-940a-9aa134629554): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:14:26 crc kubenswrapper[4783]: E1002 11:14:26.624550 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/placement-db-sync-l9zch" podUID="6c42c3ba-c130-4b8d-940a-9aa134629554" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.638985 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.123:5353: i/o timeout" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.766916 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-sb\") pod \"9e9cc646-1006-4d71-8ac6-74eb08755970\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.767096 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-svc\") pod \"9e9cc646-1006-4d71-8ac6-74eb08755970\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.767122 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-config\") pod \"9e9cc646-1006-4d71-8ac6-74eb08755970\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.767192 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-nb\") pod \"9e9cc646-1006-4d71-8ac6-74eb08755970\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.767231 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8tnf\" (UniqueName: \"kubernetes.io/projected/9e9cc646-1006-4d71-8ac6-74eb08755970-kube-api-access-w8tnf\") pod \"9e9cc646-1006-4d71-8ac6-74eb08755970\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.767249 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-swift-storage-0\") pod \"9e9cc646-1006-4d71-8ac6-74eb08755970\" (UID: \"9e9cc646-1006-4d71-8ac6-74eb08755970\") " Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.778666 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e9cc646-1006-4d71-8ac6-74eb08755970-kube-api-access-w8tnf" (OuterVolumeSpecName: "kube-api-access-w8tnf") pod "9e9cc646-1006-4d71-8ac6-74eb08755970" (UID: "9e9cc646-1006-4d71-8ac6-74eb08755970"). InnerVolumeSpecName "kube-api-access-w8tnf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.822364 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9e9cc646-1006-4d71-8ac6-74eb08755970" (UID: "9e9cc646-1006-4d71-8ac6-74eb08755970"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.827080 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9e9cc646-1006-4d71-8ac6-74eb08755970" (UID: "9e9cc646-1006-4d71-8ac6-74eb08755970"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.845725 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9e9cc646-1006-4d71-8ac6-74eb08755970" (UID: "9e9cc646-1006-4d71-8ac6-74eb08755970"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.852654 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9e9cc646-1006-4d71-8ac6-74eb08755970" (UID: "9e9cc646-1006-4d71-8ac6-74eb08755970"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.870148 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.870206 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8tnf\" (UniqueName: \"kubernetes.io/projected/9e9cc646-1006-4d71-8ac6-74eb08755970-kube-api-access-w8tnf\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.870216 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.870224 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.870236 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.872503 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-config" (OuterVolumeSpecName: "config") pod "9e9cc646-1006-4d71-8ac6-74eb08755970" (UID: "9e9cc646-1006-4d71-8ac6-74eb08755970"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:26 crc kubenswrapper[4783]: I1002 11:14:26.972568 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9e9cc646-1006-4d71-8ac6-74eb08755970-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:27 crc kubenswrapper[4783]: I1002 11:14:27.314830 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" event={"ID":"9e9cc646-1006-4d71-8ac6-74eb08755970","Type":"ContainerDied","Data":"d7ec9950130f46914c33f1b26b58bf1822b545e92b7fd1862da7e9cfc42f0c5d"} Oct 02 11:14:27 crc kubenswrapper[4783]: I1002 11:14:27.314967 4783 scope.go:117] "RemoveContainer" containerID="bdf71aa2917a5242d0327bfa7446400898fd0899cc9d3afdda31d9c052566ea7" Oct 02 11:14:27 crc kubenswrapper[4783]: I1002 11:14:27.316074 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-77585f5f8c-n4xzc" Oct 02 11:14:27 crc kubenswrapper[4783]: E1002 11:14:27.322429 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"placement-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-placement-api:current-podified\\\"\"" pod="openstack/placement-db-sync-l9zch" podUID="6c42c3ba-c130-4b8d-940a-9aa134629554" Oct 02 11:14:27 crc kubenswrapper[4783]: I1002 11:14:27.371539 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-n4xzc"] Oct 02 11:14:27 crc kubenswrapper[4783]: I1002 11:14:27.380719 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-77585f5f8c-n4xzc"] Oct 02 11:14:27 crc kubenswrapper[4783]: I1002 11:14:27.557316 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" path="/var/lib/kubelet/pods/9e9cc646-1006-4d71-8ac6-74eb08755970/volumes" Oct 02 11:14:28 crc kubenswrapper[4783]: I1002 11:14:28.323597 4783 generic.go:334] "Generic (PLEG): container finished" podID="c91175c5-8c5b-4e52-aeb1-6a6f181803a7" containerID="4266001ae8854435bf1bb79107b79c61b25ece67b50254a237dc0f2dba2d06bf" exitCode=0 Oct 02 11:14:28 crc kubenswrapper[4783]: I1002 11:14:28.323692 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h682n" event={"ID":"c91175c5-8c5b-4e52-aeb1-6a6f181803a7","Type":"ContainerDied","Data":"4266001ae8854435bf1bb79107b79c61b25ece67b50254a237dc0f2dba2d06bf"} Oct 02 11:14:32 crc kubenswrapper[4783]: E1002 11:14:32.569811 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Oct 02 11:14:32 crc kubenswrapper[4783]: E1002 11:14:32.570427 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n57ch655h87h87h67dhch654h647hb7h98h685h69h587h54ch598h55h646h5c6h549h646h576h687hd9hdbh5f7h5cbh699h8dhc8h5d8h8bh6bq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-d65jt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-84f9c985fc-t6l7c_openstack(61fe47fc-c4a7-4f02-8629-04abe2e0eb47): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:14:32 crc kubenswrapper[4783]: E1002 11:14:32.577575 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-84f9c985fc-t6l7c" podUID="61fe47fc-c4a7-4f02-8629-04abe2e0eb47" Oct 02 11:14:32 crc kubenswrapper[4783]: E1002 11:14:32.581737 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Oct 02 11:14:32 crc kubenswrapper[4783]: E1002 11:14:32.581896 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n59bh646h79h57ch5bfhbch94hb6h687h688h66hdfh679h64ch54bh57ch5c7hdch578h677h65fh547h64fh56bh65ch5dbh5dfh7fh666h5dh55h5cq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-x5r6w,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-5fcdf54855-qfzfs_openstack(aa156353-fc3a-4a52-b6cd-8b4a4ef1e898): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:14:32 crc kubenswrapper[4783]: E1002 11:14:32.588122 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-5fcdf54855-qfzfs" podUID="aa156353-fc3a-4a52-b6cd-8b4a4ef1e898" Oct 02 11:14:35 crc kubenswrapper[4783]: E1002 11:14:35.105355 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified" Oct 02 11:14:35 crc kubenswrapper[4783]: E1002 11:14:35.106055 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:barbican-db-sync,Image:quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified,Command:[/bin/bash],Args:[-c barbican-manage db upgrade],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/barbican/barbican.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qz4gg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42403,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42403,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod barbican-db-sync-g9szh_openstack(68c31bf9-b59a-43ed-bb74-9e6cc0bce703): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:14:35 crc kubenswrapper[4783]: E1002 11:14:35.107243 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/barbican-db-sync-g9szh" podUID="68c31bf9-b59a-43ed-bb74-9e6cc0bce703" Oct 02 11:14:35 crc kubenswrapper[4783]: E1002 11:14:35.129897 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-horizon:current-podified" Oct 02 11:14:35 crc kubenswrapper[4783]: E1002 11:14:35.130039 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:horizon-log,Image:quay.io/podified-antelope-centos9/openstack-horizon:current-podified,Command:[/bin/bash],Args:[-c tail -n+1 -F /var/log/horizon/horizon.log],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n54h667hd4h67fh7ch569h5c7h8fh5fch56fh65fh4h556hb4h579h658h56h575h685hbh75h686h5f4h59bh69h66bhbdh8fh57dh668h56h7fq,ValueFrom:nil,},EnvVar{Name:ENABLE_DESIGNATE,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_HEAT,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_IRONIC,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_MANILA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_OCTAVIA,Value:yes,ValueFrom:nil,},EnvVar{Name:ENABLE_WATCHER,Value:no,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:UNPACK_THEME,Value:true,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:logs,ReadOnly:false,MountPath:/var/log/horizon,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8q588,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*48,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*true,RunAsGroup:*42400,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod horizon-547b75bc85-glzvn_openstack(93f15260-265f-4b6f-96c9-f1beb4556c59): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:14:35 crc kubenswrapper[4783]: E1002 11:14:35.136048 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="[failed to \"StartContainer\" for \"horizon-log\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\", failed to \"StartContainer\" for \"horizon\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-horizon:current-podified\\\"\"]" pod="openstack/horizon-547b75bc85-glzvn" podUID="93f15260-265f-4b6f-96c9-f1beb4556c59" Oct 02 11:14:35 crc kubenswrapper[4783]: E1002 11:14:35.383962 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"barbican-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-barbican-api:current-podified\\\"\"" pod="openstack/barbican-db-sync-g9szh" podUID="68c31bf9-b59a-43ed-bb74-9e6cc0bce703" Oct 02 11:14:38 crc kubenswrapper[4783]: I1002 11:14:38.428869 4783 generic.go:334] "Generic (PLEG): container finished" podID="aea4b3d6-8814-424e-a0b0-2748b63f0bfd" containerID="2aaa87d75cd2f7f0c066b1784385c64cc0de6811f713c16422f6cb7a24d61add" exitCode=0 Oct 02 11:14:38 crc kubenswrapper[4783]: I1002 11:14:38.429372 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jq2kf" event={"ID":"aea4b3d6-8814-424e-a0b0-2748b63f0bfd","Type":"ContainerDied","Data":"2aaa87d75cd2f7f0c066b1784385c64cc0de6811f713c16422f6cb7a24d61add"} Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.855430 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.864373 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.870621 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872097 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5r6w\" (UniqueName: \"kubernetes.io/projected/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-kube-api-access-x5r6w\") pod \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872230 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-credential-keys\") pod \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872273 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-fernet-keys\") pod \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872331 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-scripts\") pod \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872380 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-config-data\") pod \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872451 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-scripts\") pod \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872502 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-combined-ca-bundle\") pod \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872559 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-horizon-secret-key\") pod \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872591 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-config-data\") pod \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872657 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rjjzn\" (UniqueName: \"kubernetes.io/projected/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-kube-api-access-rjjzn\") pod \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\" (UID: \"c91175c5-8c5b-4e52-aeb1-6a6f181803a7\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.872694 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-logs\") pod \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\" (UID: \"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.873326 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-logs" (OuterVolumeSpecName: "logs") pod "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898" (UID: "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.874337 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-config-data" (OuterVolumeSpecName: "config-data") pod "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898" (UID: "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.879267 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-scripts" (OuterVolumeSpecName: "scripts") pod "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898" (UID: "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.882522 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-kube-api-access-rjjzn" (OuterVolumeSpecName: "kube-api-access-rjjzn") pod "c91175c5-8c5b-4e52-aeb1-6a6f181803a7" (UID: "c91175c5-8c5b-4e52-aeb1-6a6f181803a7"). InnerVolumeSpecName "kube-api-access-rjjzn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.888056 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-kube-api-access-x5r6w" (OuterVolumeSpecName: "kube-api-access-x5r6w") pod "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898" (UID: "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898"). InnerVolumeSpecName "kube-api-access-x5r6w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.894871 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898" (UID: "aa156353-fc3a-4a52-b6cd-8b4a4ef1e898"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.894884 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "c91175c5-8c5b-4e52-aeb1-6a6f181803a7" (UID: "c91175c5-8c5b-4e52-aeb1-6a6f181803a7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.894895 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-scripts" (OuterVolumeSpecName: "scripts") pod "c91175c5-8c5b-4e52-aeb1-6a6f181803a7" (UID: "c91175c5-8c5b-4e52-aeb1-6a6f181803a7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.916762 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "c91175c5-8c5b-4e52-aeb1-6a6f181803a7" (UID: "c91175c5-8c5b-4e52-aeb1-6a6f181803a7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.926243 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-config-data" (OuterVolumeSpecName: "config-data") pod "c91175c5-8c5b-4e52-aeb1-6a6f181803a7" (UID: "c91175c5-8c5b-4e52-aeb1-6a6f181803a7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.926392 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c91175c5-8c5b-4e52-aeb1-6a6f181803a7" (UID: "c91175c5-8c5b-4e52-aeb1-6a6f181803a7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.974183 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-logs\") pod \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.974309 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-scripts\") pod \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.974344 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-config-data\") pod \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.974441 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-horizon-secret-key\") pod \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.974498 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-logs" (OuterVolumeSpecName: "logs") pod "61fe47fc-c4a7-4f02-8629-04abe2e0eb47" (UID: "61fe47fc-c4a7-4f02-8629-04abe2e0eb47"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.974548 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d65jt\" (UniqueName: \"kubernetes.io/projected/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-kube-api-access-d65jt\") pod \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\" (UID: \"61fe47fc-c4a7-4f02-8629-04abe2e0eb47\") " Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.974829 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-scripts" (OuterVolumeSpecName: "scripts") pod "61fe47fc-c4a7-4f02-8629-04abe2e0eb47" (UID: "61fe47fc-c4a7-4f02-8629-04abe2e0eb47"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975004 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5r6w\" (UniqueName: \"kubernetes.io/projected/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-kube-api-access-x5r6w\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975027 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975041 4783 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975052 4783 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975063 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975073 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975085 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975098 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975111 4783 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975121 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975132 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rjjzn\" (UniqueName: \"kubernetes.io/projected/c91175c5-8c5b-4e52-aeb1-6a6f181803a7-kube-api-access-rjjzn\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975143 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975153 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.975813 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-config-data" (OuterVolumeSpecName: "config-data") pod "61fe47fc-c4a7-4f02-8629-04abe2e0eb47" (UID: "61fe47fc-c4a7-4f02-8629-04abe2e0eb47"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.978618 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "61fe47fc-c4a7-4f02-8629-04abe2e0eb47" (UID: "61fe47fc-c4a7-4f02-8629-04abe2e0eb47"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:45 crc kubenswrapper[4783]: I1002 11:14:45.978635 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-kube-api-access-d65jt" (OuterVolumeSpecName: "kube-api-access-d65jt") pod "61fe47fc-c4a7-4f02-8629-04abe2e0eb47" (UID: "61fe47fc-c4a7-4f02-8629-04abe2e0eb47"). InnerVolumeSpecName "kube-api-access-d65jt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.076866 4783 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.076909 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d65jt\" (UniqueName: \"kubernetes.io/projected/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-kube-api-access-d65jt\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.076920 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/61fe47fc-c4a7-4f02-8629-04abe2e0eb47-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.511294 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf54855-qfzfs" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.511304 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf54855-qfzfs" event={"ID":"aa156353-fc3a-4a52-b6cd-8b4a4ef1e898","Type":"ContainerDied","Data":"90511279d6f6a8dea9511165cc6732b44daf32b2de9b9815f180f79c2e522579"} Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.513152 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-84f9c985fc-t6l7c" event={"ID":"61fe47fc-c4a7-4f02-8629-04abe2e0eb47","Type":"ContainerDied","Data":"e6cd161be7d4147e4fbf4e0235ecae4290278648cf7477bf5961c3dc36154d70"} Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.513258 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-84f9c985fc-t6l7c" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.517242 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-h682n" event={"ID":"c91175c5-8c5b-4e52-aeb1-6a6f181803a7","Type":"ContainerDied","Data":"4be1fdcd2bc43564343385e369d13cc05490fd2d6054e16357c6640af6ac45dd"} Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.517318 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4be1fdcd2bc43564343385e369d13cc05490fd2d6054e16357c6640af6ac45dd" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.517269 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-h682n" Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.585928 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fcdf54855-qfzfs"] Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.594186 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5fcdf54855-qfzfs"] Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.665811 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-84f9c985fc-t6l7c"] Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.665868 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-84f9c985fc-t6l7c"] Oct 02 11:14:46 crc kubenswrapper[4783]: I1002 11:14:46.995191 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-h682n"] Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.001350 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-h682n"] Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.098190 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-bw8mq"] Oct 02 11:14:47 crc kubenswrapper[4783]: E1002 11:14:47.098730 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c91175c5-8c5b-4e52-aeb1-6a6f181803a7" containerName="keystone-bootstrap" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.098833 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c91175c5-8c5b-4e52-aeb1-6a6f181803a7" containerName="keystone-bootstrap" Oct 02 11:14:47 crc kubenswrapper[4783]: E1002 11:14:47.098927 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="dnsmasq-dns" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.098977 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="dnsmasq-dns" Oct 02 11:14:47 crc kubenswrapper[4783]: E1002 11:14:47.099029 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="init" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.099078 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="init" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.099280 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e9cc646-1006-4d71-8ac6-74eb08755970" containerName="dnsmasq-dns" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.099343 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c91175c5-8c5b-4e52-aeb1-6a6f181803a7" containerName="keystone-bootstrap" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.099912 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.103944 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.104196 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wgqxk" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.104360 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.104504 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.110920 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bw8mq"] Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.298381 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-scripts\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.298739 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-credential-keys\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.298841 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-fernet-keys\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.299100 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-config-data\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.299275 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-combined-ca-bundle\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.299370 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n5t9b\" (UniqueName: \"kubernetes.io/projected/2f856246-d4db-48e2-81ec-b756ceba0667-kube-api-access-n5t9b\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.400638 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-config-data\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.400764 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-combined-ca-bundle\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.400819 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n5t9b\" (UniqueName: \"kubernetes.io/projected/2f856246-d4db-48e2-81ec-b756ceba0667-kube-api-access-n5t9b\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.400908 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-scripts\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.401012 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-credential-keys\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.401054 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-fernet-keys\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.410195 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-combined-ca-bundle\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.410362 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-scripts\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.410922 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-config-data\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.411032 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-credential-keys\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.415951 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-fernet-keys\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.424452 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n5t9b\" (UniqueName: \"kubernetes.io/projected/2f856246-d4db-48e2-81ec-b756ceba0667-kube-api-access-n5t9b\") pod \"keystone-bootstrap-bw8mq\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.426537 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.560947 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="61fe47fc-c4a7-4f02-8629-04abe2e0eb47" path="/var/lib/kubelet/pods/61fe47fc-c4a7-4f02-8629-04abe2e0eb47/volumes" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.561865 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aa156353-fc3a-4a52-b6cd-8b4a4ef1e898" path="/var/lib/kubelet/pods/aa156353-fc3a-4a52-b6cd-8b4a4ef1e898/volumes" Oct 02 11:14:47 crc kubenswrapper[4783]: I1002 11:14:47.562285 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c91175c5-8c5b-4e52-aeb1-6a6f181803a7" path="/var/lib/kubelet/pods/c91175c5-8c5b-4e52-aeb1-6a6f181803a7/volumes" Oct 02 11:14:48 crc kubenswrapper[4783]: E1002 11:14:48.462708 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified" Oct 02 11:14:48 crc kubenswrapper[4783]: E1002 11:14:48.462955 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:ceilometer-central-agent,Image:quay.io/podified-antelope-centos9/openstack-ceilometer-central:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n67h6fhcdhc8h5b9h665hc6h67bh6dh9dhb6hcfh688h6dh7dh5b8hch567h59dh77h594h8bh5bdh657h68ch565h548h87h87h57bhd6hb8q,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:scripts,ReadOnly:true,MountPath:/var/lib/openstack/bin,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/openstack/config,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:ceilometer-central-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hnhr6,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:&ExecAction{Command:[/usr/bin/python3 /var/lib/openstack/bin/centralhealth.py],},HTTPGet:nil,TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:300,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ceilometer-0_openstack(193c55b5-888f-4738-b8f6-c075d2b396a5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.485778 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.496136 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jq2kf" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.551707 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-jq2kf" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.551707 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-jq2kf" event={"ID":"aea4b3d6-8814-424e-a0b0-2748b63f0bfd","Type":"ContainerDied","Data":"a6f8a68ae861f6d02b46847f1ec0ea627e72f3e0c879eeab3bda06aaf2dcc00c"} Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.551829 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a6f8a68ae861f6d02b46847f1ec0ea627e72f3e0c879eeab3bda06aaf2dcc00c" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.552772 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-547b75bc85-glzvn" event={"ID":"93f15260-265f-4b6f-96c9-f1beb4556c59","Type":"ContainerDied","Data":"20eb04823c03896b1c62593a8b99afaf28f1cefd09c80d23d1303516308cbff6"} Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.552835 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-547b75bc85-glzvn" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621161 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbhlz\" (UniqueName: \"kubernetes.io/projected/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-kube-api-access-qbhlz\") pod \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621244 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-config-data\") pod \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621372 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/93f15260-265f-4b6f-96c9-f1beb4556c59-horizon-secret-key\") pod \"93f15260-265f-4b6f-96c9-f1beb4556c59\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621589 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-db-sync-config-data\") pod \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621659 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-config-data\") pod \"93f15260-265f-4b6f-96c9-f1beb4556c59\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621704 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-combined-ca-bundle\") pod \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\" (UID: \"aea4b3d6-8814-424e-a0b0-2748b63f0bfd\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621743 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-scripts\") pod \"93f15260-265f-4b6f-96c9-f1beb4556c59\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621772 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8q588\" (UniqueName: \"kubernetes.io/projected/93f15260-265f-4b6f-96c9-f1beb4556c59-kube-api-access-8q588\") pod \"93f15260-265f-4b6f-96c9-f1beb4556c59\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.621815 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f15260-265f-4b6f-96c9-f1beb4556c59-logs\") pod \"93f15260-265f-4b6f-96c9-f1beb4556c59\" (UID: \"93f15260-265f-4b6f-96c9-f1beb4556c59\") " Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.622288 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-scripts" (OuterVolumeSpecName: "scripts") pod "93f15260-265f-4b6f-96c9-f1beb4556c59" (UID: "93f15260-265f-4b6f-96c9-f1beb4556c59"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.622379 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-config-data" (OuterVolumeSpecName: "config-data") pod "93f15260-265f-4b6f-96c9-f1beb4556c59" (UID: "93f15260-265f-4b6f-96c9-f1beb4556c59"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.623362 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/93f15260-265f-4b6f-96c9-f1beb4556c59-logs" (OuterVolumeSpecName: "logs") pod "93f15260-265f-4b6f-96c9-f1beb4556c59" (UID: "93f15260-265f-4b6f-96c9-f1beb4556c59"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.626339 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/93f15260-265f-4b6f-96c9-f1beb4556c59-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "93f15260-265f-4b6f-96c9-f1beb4556c59" (UID: "93f15260-265f-4b6f-96c9-f1beb4556c59"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.627056 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "aea4b3d6-8814-424e-a0b0-2748b63f0bfd" (UID: "aea4b3d6-8814-424e-a0b0-2748b63f0bfd"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.627735 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/93f15260-265f-4b6f-96c9-f1beb4556c59-kube-api-access-8q588" (OuterVolumeSpecName: "kube-api-access-8q588") pod "93f15260-265f-4b6f-96c9-f1beb4556c59" (UID: "93f15260-265f-4b6f-96c9-f1beb4556c59"). InnerVolumeSpecName "kube-api-access-8q588". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.628143 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-kube-api-access-qbhlz" (OuterVolumeSpecName: "kube-api-access-qbhlz") pod "aea4b3d6-8814-424e-a0b0-2748b63f0bfd" (UID: "aea4b3d6-8814-424e-a0b0-2748b63f0bfd"). InnerVolumeSpecName "kube-api-access-qbhlz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.659060 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aea4b3d6-8814-424e-a0b0-2748b63f0bfd" (UID: "aea4b3d6-8814-424e-a0b0-2748b63f0bfd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.684286 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-config-data" (OuterVolumeSpecName: "config-data") pod "aea4b3d6-8814-424e-a0b0-2748b63f0bfd" (UID: "aea4b3d6-8814-424e-a0b0-2748b63f0bfd"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723507 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/93f15260-265f-4b6f-96c9-f1beb4556c59-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723542 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbhlz\" (UniqueName: \"kubernetes.io/projected/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-kube-api-access-qbhlz\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723553 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723561 4783 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/93f15260-265f-4b6f-96c9-f1beb4556c59-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723570 4783 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723578 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723612 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aea4b3d6-8814-424e-a0b0-2748b63f0bfd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723621 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/93f15260-265f-4b6f-96c9-f1beb4556c59-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:48 crc kubenswrapper[4783]: I1002 11:14:48.723628 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8q588\" (UniqueName: \"kubernetes.io/projected/93f15260-265f-4b6f-96c9-f1beb4556c59-kube-api-access-8q588\") on node \"crc\" DevicePath \"\"" Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.013563 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-547b75bc85-glzvn"] Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.039040 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-547b75bc85-glzvn"] Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.558594 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="93f15260-265f-4b6f-96c9-f1beb4556c59" path="/var/lib/kubelet/pods/93f15260-265f-4b6f-96c9-f1beb4556c59/volumes" Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.964801 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-clk5x"] Oct 02 11:14:49 crc kubenswrapper[4783]: E1002 11:14:49.965510 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aea4b3d6-8814-424e-a0b0-2748b63f0bfd" containerName="glance-db-sync" Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.965526 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="aea4b3d6-8814-424e-a0b0-2748b63f0bfd" containerName="glance-db-sync" Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.965759 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="aea4b3d6-8814-424e-a0b0-2748b63f0bfd" containerName="glance-db-sync" Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.967818 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:49 crc kubenswrapper[4783]: I1002 11:14:49.989882 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-clk5x"] Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.086428 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.086495 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.086533 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-spr72\" (UniqueName: \"kubernetes.io/projected/d488b88d-66e6-40fc-a29e-ee119ebb5e40-kube-api-access-spr72\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.086596 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.086632 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.086692 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.189481 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.189567 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.189602 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.189625 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-spr72\" (UniqueName: \"kubernetes.io/projected/d488b88d-66e6-40fc-a29e-ee119ebb5e40-kube-api-access-spr72\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.189688 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.189724 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.190465 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.190474 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-swift-storage-0\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.190500 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-nb\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.190777 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-svc\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.191461 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-sb\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.224484 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-spr72\" (UniqueName: \"kubernetes.io/projected/d488b88d-66e6-40fc-a29e-ee119ebb5e40-kube-api-access-spr72\") pod \"dnsmasq-dns-8b5c85b87-clk5x\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.297021 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:50 crc kubenswrapper[4783]: E1002 11:14:50.595290 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified" Oct 02 11:14:50 crc kubenswrapper[4783]: E1002 11:14:50.595498 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jscxl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-sqsnf_openstack(9e4fb56d-2565-4383-a883-a0c1eae40cb4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:14:50 crc kubenswrapper[4783]: E1002 11:14:50.596816 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-sqsnf" podUID="9e4fb56d-2565-4383-a883-a0c1eae40cb4" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.625780 4783 scope.go:117] "RemoveContainer" containerID="ce7d2f53a05d990cd59dc7d3d4b2f3bf9e3f68b43e4cb648f6c6873d9ed39a0f" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.863582 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.866060 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.869783 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-5lt5x" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.869982 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.870171 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 02 11:14:50 crc kubenswrapper[4783]: I1002 11:14:50.886146 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.006623 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.006930 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.006956 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.006989 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.007010 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-logs\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.007035 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.007074 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wbgzq\" (UniqueName: \"kubernetes.io/projected/6f36ac44-6c86-4de4-9401-bf07f8907c19-kube-api-access-wbgzq\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.108508 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.108552 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-logs\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.108578 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.108617 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wbgzq\" (UniqueName: \"kubernetes.io/projected/6f36ac44-6c86-4de4-9401-bf07f8907c19-kube-api-access-wbgzq\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.108740 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.108778 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.108803 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.109237 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.109555 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.111291 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-logs\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.115581 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.119905 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-scripts\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.125689 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-config-data\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.145756 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wbgzq\" (UniqueName: \"kubernetes.io/projected/6f36ac44-6c86-4de4-9401-bf07f8907c19-kube-api-access-wbgzq\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.181345 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.192091 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.195606 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.201703 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.208272 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.246200 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-5fcdf587dd-wvthh"] Oct 02 11:14:51 crc kubenswrapper[4783]: W1002 11:14:51.281928 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8abc2e1e_de94_4880_8a75_0c7ee0a2cdba.slice/crio-f71c1c3a0a71f25968d4e14e51f8b0e7503b8a2996217ec14dca02b29bce2a6b WatchSource:0}: Error finding container f71c1c3a0a71f25968d4e14e51f8b0e7503b8a2996217ec14dca02b29bce2a6b: Status 404 returned error can't find the container with id f71c1c3a0a71f25968d4e14e51f8b0e7503b8a2996217ec14dca02b29bce2a6b Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.319901 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.320100 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.320215 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.320236 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.320442 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.320462 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bmdtw\" (UniqueName: \"kubernetes.io/projected/c3562137-37ec-46c4-a696-e4541b41c6a5-kube-api-access-bmdtw\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.320510 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.339717 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/horizon-567b57d86d-gv6fq"] Oct 02 11:14:51 crc kubenswrapper[4783]: W1002 11:14:51.359195 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod48c11fb6_76f0_4028_a76f_6f67904bf3aa.slice/crio-3226d7c7ad6790c403ab30525395a387ed505134f6672b381b04ece760827b56 WatchSource:0}: Error finding container 3226d7c7ad6790c403ab30525395a387ed505134f6672b381b04ece760827b56: Status 404 returned error can't find the container with id 3226d7c7ad6790c403ab30525395a387ed505134f6672b381b04ece760827b56 Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.424169 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.424231 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bmdtw\" (UniqueName: \"kubernetes.io/projected/c3562137-37ec-46c4-a696-e4541b41c6a5-kube-api-access-bmdtw\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.424276 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.424314 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.424377 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.424436 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.424457 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.426571 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.426817 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.427835 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.432375 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.432658 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.443694 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bmdtw\" (UniqueName: \"kubernetes.io/projected/c3562137-37ec-46c4-a696-e4541b41c6a5-kube-api-access-bmdtw\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.450519 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-bw8mq"] Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.454726 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.471142 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.506360 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.514810 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.514877 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.558813 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-clk5x"] Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.589818 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bw8mq" event={"ID":"2f856246-d4db-48e2-81ec-b756ceba0667","Type":"ContainerStarted","Data":"7a07a62448795fb0f1aef403c911e2ab754085d940f7cd6997a0196469f1293c"} Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.591135 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerStarted","Data":"3226d7c7ad6790c403ab30525395a387ed505134f6672b381b04ece760827b56"} Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.593142 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-l9zch" event={"ID":"6c42c3ba-c130-4b8d-940a-9aa134629554","Type":"ContainerStarted","Data":"d0393a0ce247d27be012699cdac67d87ae42e593c65a68d13ea245fd10b74bc7"} Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.597230 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerStarted","Data":"f71c1c3a0a71f25968d4e14e51f8b0e7503b8a2996217ec14dca02b29bce2a6b"} Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.599223 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g9szh" event={"ID":"68c31bf9-b59a-43ed-bb74-9e6cc0bce703","Type":"ContainerStarted","Data":"3bc304ac88d2823182da9a993a1fdbce5020a427e3173b94a6214924e530d810"} Oct 02 11:14:51 crc kubenswrapper[4783]: E1002 11:14:51.600063 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api:current-podified\\\"\"" pod="openstack/cinder-db-sync-sqsnf" podUID="9e4fb56d-2565-4383-a883-a0c1eae40cb4" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.615365 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-l9zch" podStartSLOduration=2.8083054819999997 podStartE2EDuration="48.615349969s" podCreationTimestamp="2025-10-02 11:14:03 +0000 UTC" firstStartedPulling="2025-10-02 11:14:05.153010204 +0000 UTC m=+1278.469204465" lastFinishedPulling="2025-10-02 11:14:50.960054691 +0000 UTC m=+1324.276248952" observedRunningTime="2025-10-02 11:14:51.609077958 +0000 UTC m=+1324.925272229" watchObservedRunningTime="2025-10-02 11:14:51.615349969 +0000 UTC m=+1324.931544220" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.629883 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-g9szh" podStartSLOduration=2.971564719 podStartE2EDuration="48.629844435s" podCreationTimestamp="2025-10-02 11:14:03 +0000 UTC" firstStartedPulling="2025-10-02 11:14:05.381482761 +0000 UTC m=+1278.697677022" lastFinishedPulling="2025-10-02 11:14:51.039762477 +0000 UTC m=+1324.355956738" observedRunningTime="2025-10-02 11:14:51.625689902 +0000 UTC m=+1324.941884153" watchObservedRunningTime="2025-10-02 11:14:51.629844435 +0000 UTC m=+1324.946038696" Oct 02 11:14:51 crc kubenswrapper[4783]: I1002 11:14:51.707781 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:14:52 crc kubenswrapper[4783]: W1002 11:14:52.019396 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd488b88d_66e6_40fc_a29e_ee119ebb5e40.slice/crio-70720cc87c413f7f274df859330451a2ad62e102ce5ad0ca61a41a7c7c2dcb70 WatchSource:0}: Error finding container 70720cc87c413f7f274df859330451a2ad62e102ce5ad0ca61a41a7c7c2dcb70: Status 404 returned error can't find the container with id 70720cc87c413f7f274df859330451a2ad62e102ce5ad0ca61a41a7c7c2dcb70 Oct 02 11:14:52 crc kubenswrapper[4783]: I1002 11:14:52.512501 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:14:52 crc kubenswrapper[4783]: I1002 11:14:52.655258 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:14:52 crc kubenswrapper[4783]: I1002 11:14:52.676217 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" event={"ID":"d488b88d-66e6-40fc-a29e-ee119ebb5e40","Type":"ContainerStarted","Data":"70720cc87c413f7f274df859330451a2ad62e102ce5ad0ca61a41a7c7c2dcb70"} Oct 02 11:14:52 crc kubenswrapper[4783]: I1002 11:14:52.761945 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.048973 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.685429 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerStarted","Data":"d6ca1a25e8a2b171e26d7e7cf65572d13be61eca7af3dba25dabbe62e8259bb6"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.686909 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3562137-37ec-46c4-a696-e4541b41c6a5","Type":"ContainerStarted","Data":"37681c4c126a143047245c3dc923e0b80f59c52169e400ee6eedfc349cf916de"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.693373 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerStarted","Data":"58153738b4583103c9b6b2a35fefcf7fa1e4c224a6092baa570c4337ad49f3cb"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.693437 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerStarted","Data":"cfbd4fde1e63b62d946a09433f0e449e44368d2cfe5f4addf9ed7e8516dfecd0"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.701230 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f36ac44-6c86-4de4-9401-bf07f8907c19","Type":"ContainerStarted","Data":"3d518523afb8a573af45c326c477c25920fe914c3ce85de1b0d5b3a252c82f4e"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.704509 4783 generic.go:334] "Generic (PLEG): container finished" podID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerID="d4aecfc3bdc72e2b4e8b460a13d3dc06ba0535ba98016466dbff4c2df0da31ce" exitCode=0 Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.704644 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" event={"ID":"d488b88d-66e6-40fc-a29e-ee119ebb5e40","Type":"ContainerDied","Data":"d4aecfc3bdc72e2b4e8b460a13d3dc06ba0535ba98016466dbff4c2df0da31ce"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.707973 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerStarted","Data":"17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.708001 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerStarted","Data":"617e73daab1fa7d04c236e9a25fd46b5542b5fb3258901a035760e38676e12d9"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.713707 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bw8mq" event={"ID":"2f856246-d4db-48e2-81ec-b756ceba0667","Type":"ContainerStarted","Data":"17134fa1c0bb54795ef7e71f2b048d8f0c6857876242e93c430e358361994184"} Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.728881 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-567b57d86d-gv6fq" podStartSLOduration=40.708153231 podStartE2EDuration="41.728860973s" podCreationTimestamp="2025-10-02 11:14:12 +0000 UTC" firstStartedPulling="2025-10-02 11:14:51.36114791 +0000 UTC m=+1324.677342171" lastFinishedPulling="2025-10-02 11:14:52.381855652 +0000 UTC m=+1325.698049913" observedRunningTime="2025-10-02 11:14:53.721217544 +0000 UTC m=+1327.037411805" watchObservedRunningTime="2025-10-02 11:14:53.728860973 +0000 UTC m=+1327.045055224" Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.758207 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-bw8mq" podStartSLOduration=6.758184633 podStartE2EDuration="6.758184633s" podCreationTimestamp="2025-10-02 11:14:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:14:53.75291903 +0000 UTC m=+1327.069113291" watchObservedRunningTime="2025-10-02 11:14:53.758184633 +0000 UTC m=+1327.074378904" Oct 02 11:14:53 crc kubenswrapper[4783]: I1002 11:14:53.777378 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/horizon-5fcdf587dd-wvthh" podStartSLOduration=40.724015734 podStartE2EDuration="41.777361117s" podCreationTimestamp="2025-10-02 11:14:12 +0000 UTC" firstStartedPulling="2025-10-02 11:14:51.30765013 +0000 UTC m=+1324.623844391" lastFinishedPulling="2025-10-02 11:14:52.360995513 +0000 UTC m=+1325.677189774" observedRunningTime="2025-10-02 11:14:53.775844895 +0000 UTC m=+1327.092039156" watchObservedRunningTime="2025-10-02 11:14:53.777361117 +0000 UTC m=+1327.093555378" Oct 02 11:14:54 crc kubenswrapper[4783]: I1002 11:14:54.727970 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3562137-37ec-46c4-a696-e4541b41c6a5","Type":"ContainerStarted","Data":"f0004ca112422eb0bcf4114ffd1bd3d9d727e5f691d9f91243ec8754a4cec0f9"} Oct 02 11:14:54 crc kubenswrapper[4783]: I1002 11:14:54.730020 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f36ac44-6c86-4de4-9401-bf07f8907c19","Type":"ContainerStarted","Data":"e379f2a833ca812e2c028d2defe8b0ed8c990e0c515a9ec9fbc722ee6b6d5167"} Oct 02 11:14:54 crc kubenswrapper[4783]: I1002 11:14:54.731952 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" event={"ID":"d488b88d-66e6-40fc-a29e-ee119ebb5e40","Type":"ContainerStarted","Data":"f197b1f9f36ef3599662bf9bb2a4565c7fda88d7193ca8963ecb1d6a49c72956"} Oct 02 11:14:54 crc kubenswrapper[4783]: I1002 11:14:54.755330 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" podStartSLOduration=5.7553119630000005 podStartE2EDuration="5.755311963s" podCreationTimestamp="2025-10-02 11:14:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:14:54.751760306 +0000 UTC m=+1328.067954567" watchObservedRunningTime="2025-10-02 11:14:54.755311963 +0000 UTC m=+1328.071506224" Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.297373 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.759944 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f36ac44-6c86-4de4-9401-bf07f8907c19","Type":"ContainerStarted","Data":"fa98dd43740b83a4169798c047c27776ab8ccb1660625c2ae6bc87949100d6ec"} Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.760154 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-log" containerID="cri-o://e379f2a833ca812e2c028d2defe8b0ed8c990e0c515a9ec9fbc722ee6b6d5167" gracePeriod=30 Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.760252 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-httpd" containerID="cri-o://fa98dd43740b83a4169798c047c27776ab8ccb1660625c2ae6bc87949100d6ec" gracePeriod=30 Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.773180 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3562137-37ec-46c4-a696-e4541b41c6a5","Type":"ContainerStarted","Data":"6e14e9fd0cc04ff1444703373737289f16f7b2274a56ef54955e7b80ffe25ca2"} Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.773232 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-log" containerID="cri-o://f0004ca112422eb0bcf4114ffd1bd3d9d727e5f691d9f91243ec8754a4cec0f9" gracePeriod=30 Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.773255 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-httpd" containerID="cri-o://6e14e9fd0cc04ff1444703373737289f16f7b2274a56ef54955e7b80ffe25ca2" gracePeriod=30 Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.830467 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.830430902 podStartE2EDuration="6.830430902s" podCreationTimestamp="2025-10-02 11:14:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:14:55.791031946 +0000 UTC m=+1329.107226217" watchObservedRunningTime="2025-10-02 11:14:55.830430902 +0000 UTC m=+1329.146625163" Oct 02 11:14:55 crc kubenswrapper[4783]: I1002 11:14:55.836049 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=5.836030834 podStartE2EDuration="5.836030834s" podCreationTimestamp="2025-10-02 11:14:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:14:55.828398706 +0000 UTC m=+1329.144592967" watchObservedRunningTime="2025-10-02 11:14:55.836030834 +0000 UTC m=+1329.152225085" Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.783221 4783 generic.go:334] "Generic (PLEG): container finished" podID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerID="6e14e9fd0cc04ff1444703373737289f16f7b2274a56ef54955e7b80ffe25ca2" exitCode=143 Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.783248 4783 generic.go:334] "Generic (PLEG): container finished" podID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerID="f0004ca112422eb0bcf4114ffd1bd3d9d727e5f691d9f91243ec8754a4cec0f9" exitCode=143 Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.783291 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3562137-37ec-46c4-a696-e4541b41c6a5","Type":"ContainerDied","Data":"6e14e9fd0cc04ff1444703373737289f16f7b2274a56ef54955e7b80ffe25ca2"} Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.783319 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3562137-37ec-46c4-a696-e4541b41c6a5","Type":"ContainerDied","Data":"f0004ca112422eb0bcf4114ffd1bd3d9d727e5f691d9f91243ec8754a4cec0f9"} Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.785804 4783 generic.go:334] "Generic (PLEG): container finished" podID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerID="fa98dd43740b83a4169798c047c27776ab8ccb1660625c2ae6bc87949100d6ec" exitCode=143 Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.785823 4783 generic.go:334] "Generic (PLEG): container finished" podID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerID="e379f2a833ca812e2c028d2defe8b0ed8c990e0c515a9ec9fbc722ee6b6d5167" exitCode=143 Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.785844 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f36ac44-6c86-4de4-9401-bf07f8907c19","Type":"ContainerDied","Data":"fa98dd43740b83a4169798c047c27776ab8ccb1660625c2ae6bc87949100d6ec"} Oct 02 11:14:56 crc kubenswrapper[4783]: I1002 11:14:56.785894 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f36ac44-6c86-4de4-9401-bf07f8907c19","Type":"ContainerDied","Data":"e379f2a833ca812e2c028d2defe8b0ed8c990e0c515a9ec9fbc722ee6b6d5167"} Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.135872 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg"] Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.137245 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.144682 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.144975 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.151822 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg"] Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.216840 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0af4bab9-5198-4d2c-a811-17db77304d40-config-volume\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.216903 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xbjx7\" (UniqueName: \"kubernetes.io/projected/0af4bab9-5198-4d2c-a811-17db77304d40-kube-api-access-xbjx7\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.217001 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0af4bab9-5198-4d2c-a811-17db77304d40-secret-volume\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.302626 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.320505 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0af4bab9-5198-4d2c-a811-17db77304d40-config-volume\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.320625 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xbjx7\" (UniqueName: \"kubernetes.io/projected/0af4bab9-5198-4d2c-a811-17db77304d40-kube-api-access-xbjx7\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.320843 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0af4bab9-5198-4d2c-a811-17db77304d40-secret-volume\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.323516 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0af4bab9-5198-4d2c-a811-17db77304d40-config-volume\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.344079 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0af4bab9-5198-4d2c-a811-17db77304d40-secret-volume\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.347383 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xbjx7\" (UniqueName: \"kubernetes.io/projected/0af4bab9-5198-4d2c-a811-17db77304d40-kube-api-access-xbjx7\") pod \"collect-profiles-29323395-9gvqg\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.386495 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-zd229"] Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.386972 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerName="dnsmasq-dns" containerID="cri-o://99b9b24e483e972c6d5666b93e2b81580646ed22dc0c672dce56a348447713b1" gracePeriod=10 Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.607362 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.825431 4783 generic.go:334] "Generic (PLEG): container finished" podID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerID="99b9b24e483e972c6d5666b93e2b81580646ed22dc0c672dce56a348447713b1" exitCode=0 Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.825479 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" event={"ID":"ab6e0516-5a5b-45c3-9f57-f1181fd69bac","Type":"ContainerDied","Data":"99b9b24e483e972c6d5666b93e2b81580646ed22dc0c672dce56a348447713b1"} Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.902282 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.936908 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wbgzq\" (UniqueName: \"kubernetes.io/projected/6f36ac44-6c86-4de4-9401-bf07f8907c19-kube-api-access-wbgzq\") pod \"6f36ac44-6c86-4de4-9401-bf07f8907c19\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.936969 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-logs\") pod \"6f36ac44-6c86-4de4-9401-bf07f8907c19\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.936998 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-httpd-run\") pod \"6f36ac44-6c86-4de4-9401-bf07f8907c19\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.937051 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-combined-ca-bundle\") pod \"6f36ac44-6c86-4de4-9401-bf07f8907c19\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.937082 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"6f36ac44-6c86-4de4-9401-bf07f8907c19\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.937139 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-config-data\") pod \"6f36ac44-6c86-4de4-9401-bf07f8907c19\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.937274 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-scripts\") pod \"6f36ac44-6c86-4de4-9401-bf07f8907c19\" (UID: \"6f36ac44-6c86-4de4-9401-bf07f8907c19\") " Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.941579 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6f36ac44-6c86-4de4-9401-bf07f8907c19" (UID: "6f36ac44-6c86-4de4-9401-bf07f8907c19"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.942454 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-logs" (OuterVolumeSpecName: "logs") pod "6f36ac44-6c86-4de4-9401-bf07f8907c19" (UID: "6f36ac44-6c86-4de4-9401-bf07f8907c19"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.944571 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-scripts" (OuterVolumeSpecName: "scripts") pod "6f36ac44-6c86-4de4-9401-bf07f8907c19" (UID: "6f36ac44-6c86-4de4-9401-bf07f8907c19"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.947441 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f36ac44-6c86-4de4-9401-bf07f8907c19-kube-api-access-wbgzq" (OuterVolumeSpecName: "kube-api-access-wbgzq") pod "6f36ac44-6c86-4de4-9401-bf07f8907c19" (UID: "6f36ac44-6c86-4de4-9401-bf07f8907c19"). InnerVolumeSpecName "kube-api-access-wbgzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:00 crc kubenswrapper[4783]: I1002 11:15:00.953316 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "6f36ac44-6c86-4de4-9401-bf07f8907c19" (UID: "6f36ac44-6c86-4de4-9401-bf07f8907c19"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:00.999681 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-config-data" (OuterVolumeSpecName: "config-data") pod "6f36ac44-6c86-4de4-9401-bf07f8907c19" (UID: "6f36ac44-6c86-4de4-9401-bf07f8907c19"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.000765 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f36ac44-6c86-4de4-9401-bf07f8907c19" (UID: "6f36ac44-6c86-4de4-9401-bf07f8907c19"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.043831 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.043890 4783 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.043900 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.043908 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6f36ac44-6c86-4de4-9401-bf07f8907c19-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.043919 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wbgzq\" (UniqueName: \"kubernetes.io/projected/6f36ac44-6c86-4de4-9401-bf07f8907c19-kube-api-access-wbgzq\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.043929 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.043938 4783 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6f36ac44-6c86-4de4-9401-bf07f8907c19-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.094979 4783 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.149788 4783 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.266387 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg"] Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.763074 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.834996 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3562137-37ec-46c4-a696-e4541b41c6a5","Type":"ContainerDied","Data":"37681c4c126a143047245c3dc923e0b80f59c52169e400ee6eedfc349cf916de"} Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.835055 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.835067 4783 scope.go:117] "RemoveContainer" containerID="6e14e9fd0cc04ff1444703373737289f16f7b2274a56ef54955e7b80ffe25ca2" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.836894 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6f36ac44-6c86-4de4-9401-bf07f8907c19","Type":"ContainerDied","Data":"3d518523afb8a573af45c326c477c25920fe914c3ce85de1b0d5b3a252c82f4e"} Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.837023 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.837861 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" event={"ID":"0af4bab9-5198-4d2c-a811-17db77304d40","Type":"ContainerStarted","Data":"d59cec4c1e0a93ffa3c8412074ca32152c096f3fffb75431080711ce1f28451e"} Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.858084 4783 scope.go:117] "RemoveContainer" containerID="f0004ca112422eb0bcf4114ffd1bd3d9d727e5f691d9f91243ec8754a4cec0f9" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.858706 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.865626 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.876553 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-scripts\") pod \"c3562137-37ec-46c4-a696-e4541b41c6a5\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.876623 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-httpd-run\") pod \"c3562137-37ec-46c4-a696-e4541b41c6a5\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.876686 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-config-data\") pod \"c3562137-37ec-46c4-a696-e4541b41c6a5\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.876789 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"c3562137-37ec-46c4-a696-e4541b41c6a5\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.876813 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-logs\") pod \"c3562137-37ec-46c4-a696-e4541b41c6a5\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.876876 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-combined-ca-bundle\") pod \"c3562137-37ec-46c4-a696-e4541b41c6a5\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.876929 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bmdtw\" (UniqueName: \"kubernetes.io/projected/c3562137-37ec-46c4-a696-e4541b41c6a5-kube-api-access-bmdtw\") pod \"c3562137-37ec-46c4-a696-e4541b41c6a5\" (UID: \"c3562137-37ec-46c4-a696-e4541b41c6a5\") " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.878376 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-logs" (OuterVolumeSpecName: "logs") pod "c3562137-37ec-46c4-a696-e4541b41c6a5" (UID: "c3562137-37ec-46c4-a696-e4541b41c6a5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.878713 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c3562137-37ec-46c4-a696-e4541b41c6a5" (UID: "c3562137-37ec-46c4-a696-e4541b41c6a5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.903960 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:15:01 crc kubenswrapper[4783]: E1002 11:15:01.904608 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-log" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.904924 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-log" Oct 02 11:15:01 crc kubenswrapper[4783]: E1002 11:15:01.905105 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-httpd" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.905180 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-httpd" Oct 02 11:15:01 crc kubenswrapper[4783]: E1002 11:15:01.905254 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-httpd" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.905315 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-httpd" Oct 02 11:15:01 crc kubenswrapper[4783]: E1002 11:15:01.905378 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-log" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.905660 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-log" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.905658 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3562137-37ec-46c4-a696-e4541b41c6a5-kube-api-access-bmdtw" (OuterVolumeSpecName: "kube-api-access-bmdtw") pod "c3562137-37ec-46c4-a696-e4541b41c6a5" (UID: "c3562137-37ec-46c4-a696-e4541b41c6a5"). InnerVolumeSpecName "kube-api-access-bmdtw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.906046 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-log" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.906133 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-httpd" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.906203 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" containerName="glance-httpd" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.906292 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" containerName="glance-log" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.907539 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.908123 4783 scope.go:117] "RemoveContainer" containerID="fa98dd43740b83a4169798c047c27776ab8ccb1660625c2ae6bc87949100d6ec" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.912563 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "c3562137-37ec-46c4-a696-e4541b41c6a5" (UID: "c3562137-37ec-46c4-a696-e4541b41c6a5"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.915393 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.920158 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-scripts" (OuterVolumeSpecName: "scripts") pod "c3562137-37ec-46c4-a696-e4541b41c6a5" (UID: "c3562137-37ec-46c4-a696-e4541b41c6a5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.920507 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.933210 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.936149 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3562137-37ec-46c4-a696-e4541b41c6a5" (UID: "c3562137-37ec-46c4-a696-e4541b41c6a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.962469 4783 scope.go:117] "RemoveContainer" containerID="e379f2a833ca812e2c028d2defe8b0ed8c990e0c515a9ec9fbc722ee6b6d5167" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.975841 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-config-data" (OuterVolumeSpecName: "config-data") pod "c3562137-37ec-46c4-a696-e4541b41c6a5" (UID: "c3562137-37ec-46c4-a696-e4541b41c6a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979371 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-scripts\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979535 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979596 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979653 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979696 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-config-data\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979782 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbpn4\" (UniqueName: \"kubernetes.io/projected/cd779125-a75d-4045-a75c-e7ed1fc66d5a-kube-api-access-rbpn4\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979820 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-logs\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979907 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979980 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.979995 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bmdtw\" (UniqueName: \"kubernetes.io/projected/c3562137-37ec-46c4-a696-e4541b41c6a5-kube-api-access-bmdtw\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.980008 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.980021 4783 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.980033 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3562137-37ec-46c4-a696-e4541b41c6a5-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.980059 4783 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Oct 02 11:15:01 crc kubenswrapper[4783]: I1002 11:15:01.980071 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3562137-37ec-46c4-a696-e4541b41c6a5-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.011629 4783 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081442 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbpn4\" (UniqueName: \"kubernetes.io/projected/cd779125-a75d-4045-a75c-e7ed1fc66d5a-kube-api-access-rbpn4\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081481 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-logs\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081517 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081546 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-scripts\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081586 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081612 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081646 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081674 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-config-data\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.081731 4783 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.082171 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.082489 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.082578 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-logs\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.085514 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.085792 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-config-data\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.085814 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.086139 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-scripts\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.100633 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbpn4\" (UniqueName: \"kubernetes.io/projected/cd779125-a75d-4045-a75c-e7ed1fc66d5a-kube-api-access-rbpn4\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.111052 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.212939 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.227122 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.251877 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.254443 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.256103 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.259946 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.260143 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.270503 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.387443 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.387809 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.387844 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.388001 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.388129 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.388183 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.388228 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z8f4w\" (UniqueName: \"kubernetes.io/projected/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-kube-api-access-z8f4w\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.388263 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-logs\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.489697 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.490120 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.490161 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.490211 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.490259 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.490301 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.490345 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z8f4w\" (UniqueName: \"kubernetes.io/projected/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-kube-api-access-z8f4w\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.490383 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-logs\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.491078 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-logs\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.491787 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.493911 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.496438 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-scripts\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.512320 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.512778 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-config-data\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.518179 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.520153 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z8f4w\" (UniqueName: \"kubernetes.io/projected/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-kube-api-access-z8f4w\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.574833 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.674911 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.935938 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:15:02 crc kubenswrapper[4783]: W1002 11:15:02.963671 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcd779125_a75d_4045_a75c_e7ed1fc66d5a.slice/crio-8ac7d27c846dfc67805ffbfd60dfe18285b3a7a6e05f1361f2fb4aeb0abcd092 WatchSource:0}: Error finding container 8ac7d27c846dfc67805ffbfd60dfe18285b3a7a6e05f1361f2fb4aeb0abcd092: Status 404 returned error can't find the container with id 8ac7d27c846dfc67805ffbfd60dfe18285b3a7a6e05f1361f2fb4aeb0abcd092 Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.980921 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:15:02 crc kubenswrapper[4783]: I1002 11:15:02.980966 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.113070 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.113149 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.115214 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.274454 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:15:03 crc kubenswrapper[4783]: W1002 11:15:03.276073 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod591ff3e8_f8cc_4ca4_81a4_5b1cfc19ceb5.slice/crio-7f3ea338794d35487f01da25139791360378f7c07ff57dbacad4d0e0ee911279 WatchSource:0}: Error finding container 7f3ea338794d35487f01da25139791360378f7c07ff57dbacad4d0e0ee911279: Status 404 returned error can't find the container with id 7f3ea338794d35487f01da25139791360378f7c07ff57dbacad4d0e0ee911279 Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.601124 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f36ac44-6c86-4de4-9401-bf07f8907c19" path="/var/lib/kubelet/pods/6f36ac44-6c86-4de4-9401-bf07f8907c19/volumes" Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.602268 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3562137-37ec-46c4-a696-e4541b41c6a5" path="/var/lib/kubelet/pods/c3562137-37ec-46c4-a696-e4541b41c6a5/volumes" Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.866456 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5","Type":"ContainerStarted","Data":"7f3ea338794d35487f01da25139791360378f7c07ff57dbacad4d0e0ee911279"} Oct 02 11:15:03 crc kubenswrapper[4783]: I1002 11:15:03.867747 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cd779125-a75d-4045-a75c-e7ed1fc66d5a","Type":"ContainerStarted","Data":"8ac7d27c846dfc67805ffbfd60dfe18285b3a7a6e05f1361f2fb4aeb0abcd092"} Oct 02 11:15:04 crc kubenswrapper[4783]: I1002 11:15:04.212671 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.140:5353: connect: connection refused" Oct 02 11:15:04 crc kubenswrapper[4783]: I1002 11:15:04.879399 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5","Type":"ContainerStarted","Data":"e50236fc5cac20cd9b4cff71215510458ff498a2e3572cfcbeea785ab45803f8"} Oct 02 11:15:04 crc kubenswrapper[4783]: I1002 11:15:04.881601 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cd779125-a75d-4045-a75c-e7ed1fc66d5a","Type":"ContainerStarted","Data":"4e6ac63eed2338754a176ea676e3a145a674a9566cbdddb5d20fd0d89861fee5"} Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.616617 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.751626 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-swift-storage-0\") pod \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.751673 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-nb\") pod \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.751788 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-st9j9\" (UniqueName: \"kubernetes.io/projected/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-kube-api-access-st9j9\") pod \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.751812 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-sb\") pod \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.751909 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-config\") pod \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.751979 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-svc\") pod \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\" (UID: \"ab6e0516-5a5b-45c3-9f57-f1181fd69bac\") " Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.773306 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-kube-api-access-st9j9" (OuterVolumeSpecName: "kube-api-access-st9j9") pod "ab6e0516-5a5b-45c3-9f57-f1181fd69bac" (UID: "ab6e0516-5a5b-45c3-9f57-f1181fd69bac"). InnerVolumeSpecName "kube-api-access-st9j9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.858065 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-st9j9\" (UniqueName: \"kubernetes.io/projected/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-kube-api-access-st9j9\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.860395 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ab6e0516-5a5b-45c3-9f57-f1181fd69bac" (UID: "ab6e0516-5a5b-45c3-9f57-f1181fd69bac"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.860566 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ab6e0516-5a5b-45c3-9f57-f1181fd69bac" (UID: "ab6e0516-5a5b-45c3-9f57-f1181fd69bac"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.880400 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ab6e0516-5a5b-45c3-9f57-f1181fd69bac" (UID: "ab6e0516-5a5b-45c3-9f57-f1181fd69bac"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.909612 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" event={"ID":"ab6e0516-5a5b-45c3-9f57-f1181fd69bac","Type":"ContainerDied","Data":"4c667ed5e1b4d8e45b9e0a393110b53926820953d7c1036fd41db7fd34c1a981"} Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.909685 4783 scope.go:117] "RemoveContainer" containerID="99b9b24e483e972c6d5666b93e2b81580646ed22dc0c672dce56a348447713b1" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.909874 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-76fcf4b695-zd229" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.917991 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-config" (OuterVolumeSpecName: "config") pod "ab6e0516-5a5b-45c3-9f57-f1181fd69bac" (UID: "ab6e0516-5a5b-45c3-9f57-f1181fd69bac"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.927305 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cd779125-a75d-4045-a75c-e7ed1fc66d5a","Type":"ContainerStarted","Data":"c96f3388bdb9fb30ee9e91bfef63294eff4d35265da91c49c40a99af49e4f40c"} Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.927300 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ab6e0516-5a5b-45c3-9f57-f1181fd69bac" (UID: "ab6e0516-5a5b-45c3-9f57-f1181fd69bac"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.950695 4783 generic.go:334] "Generic (PLEG): container finished" podID="0af4bab9-5198-4d2c-a811-17db77304d40" containerID="731599dcf1b27223bb7f1169e21a3d5cfa4d21a91da6ce99cd69266dfc646729" exitCode=0 Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.950903 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" event={"ID":"0af4bab9-5198-4d2c-a811-17db77304d40","Type":"ContainerDied","Data":"731599dcf1b27223bb7f1169e21a3d5cfa4d21a91da6ce99cd69266dfc646729"} Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.954309 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.95428715 podStartE2EDuration="4.95428715s" podCreationTimestamp="2025-10-02 11:15:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:05.953526619 +0000 UTC m=+1339.269720880" watchObservedRunningTime="2025-10-02 11:15:05.95428715 +0000 UTC m=+1339.270481411" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.959584 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.959619 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.959690 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.959732 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.959741 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ab6e0516-5a5b-45c3-9f57-f1181fd69bac-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.960513 4783 scope.go:117] "RemoveContainer" containerID="cf943614091729b07abe51e9632bbf7489fc0b8ded142b3fb65b60c4b0112d8c" Oct 02 11:15:05 crc kubenswrapper[4783]: I1002 11:15:05.990668 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5","Type":"ContainerStarted","Data":"18ff904c3244a87925e50c6ef8c9e260a9662888608826189c0acc5790554d3e"} Oct 02 11:15:06 crc kubenswrapper[4783]: I1002 11:15:06.023601 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.023570521 podStartE2EDuration="4.023570521s" podCreationTimestamp="2025-10-02 11:15:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:06.021736661 +0000 UTC m=+1339.337930942" watchObservedRunningTime="2025-10-02 11:15:06.023570521 +0000 UTC m=+1339.339764772" Oct 02 11:15:06 crc kubenswrapper[4783]: I1002 11:15:06.249515 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-zd229"] Oct 02 11:15:06 crc kubenswrapper[4783]: I1002 11:15:06.258115 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-76fcf4b695-zd229"] Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.004957 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqsnf" event={"ID":"9e4fb56d-2565-4383-a883-a0c1eae40cb4","Type":"ContainerStarted","Data":"6065cae19bc7b3d54b1ea3462555fa77433d6d20d53fc623121834b0b2926859"} Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.015267 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerStarted","Data":"d2040146a56d5ca88572ce0ab8e2e1bf67062b0343213e2ed5a11e8a1e67349d"} Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.036376 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-sqsnf" podStartSLOduration=3.089393475 podStartE2EDuration="1m4.036357577s" podCreationTimestamp="2025-10-02 11:14:03 +0000 UTC" firstStartedPulling="2025-10-02 11:14:04.821992068 +0000 UTC m=+1278.138186329" lastFinishedPulling="2025-10-02 11:15:05.76895617 +0000 UTC m=+1339.085150431" observedRunningTime="2025-10-02 11:15:07.029327365 +0000 UTC m=+1340.345521636" watchObservedRunningTime="2025-10-02 11:15:07.036357577 +0000 UTC m=+1340.352551838" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.389691 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.486978 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xbjx7\" (UniqueName: \"kubernetes.io/projected/0af4bab9-5198-4d2c-a811-17db77304d40-kube-api-access-xbjx7\") pod \"0af4bab9-5198-4d2c-a811-17db77304d40\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.487359 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0af4bab9-5198-4d2c-a811-17db77304d40-config-volume\") pod \"0af4bab9-5198-4d2c-a811-17db77304d40\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.487386 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0af4bab9-5198-4d2c-a811-17db77304d40-secret-volume\") pod \"0af4bab9-5198-4d2c-a811-17db77304d40\" (UID: \"0af4bab9-5198-4d2c-a811-17db77304d40\") " Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.488019 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0af4bab9-5198-4d2c-a811-17db77304d40-config-volume" (OuterVolumeSpecName: "config-volume") pod "0af4bab9-5198-4d2c-a811-17db77304d40" (UID: "0af4bab9-5198-4d2c-a811-17db77304d40"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.492320 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0af4bab9-5198-4d2c-a811-17db77304d40-kube-api-access-xbjx7" (OuterVolumeSpecName: "kube-api-access-xbjx7") pod "0af4bab9-5198-4d2c-a811-17db77304d40" (UID: "0af4bab9-5198-4d2c-a811-17db77304d40"). InnerVolumeSpecName "kube-api-access-xbjx7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.492644 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0af4bab9-5198-4d2c-a811-17db77304d40-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0af4bab9-5198-4d2c-a811-17db77304d40" (UID: "0af4bab9-5198-4d2c-a811-17db77304d40"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.565748 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" path="/var/lib/kubelet/pods/ab6e0516-5a5b-45c3-9f57-f1181fd69bac/volumes" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.589408 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xbjx7\" (UniqueName: \"kubernetes.io/projected/0af4bab9-5198-4d2c-a811-17db77304d40-kube-api-access-xbjx7\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.591222 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0af4bab9-5198-4d2c-a811-17db77304d40-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:07 crc kubenswrapper[4783]: I1002 11:15:07.591234 4783 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0af4bab9-5198-4d2c-a811-17db77304d40-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:08 crc kubenswrapper[4783]: I1002 11:15:08.025776 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" event={"ID":"0af4bab9-5198-4d2c-a811-17db77304d40","Type":"ContainerDied","Data":"d59cec4c1e0a93ffa3c8412074ca32152c096f3fffb75431080711ce1f28451e"} Oct 02 11:15:08 crc kubenswrapper[4783]: I1002 11:15:08.025819 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d59cec4c1e0a93ffa3c8412074ca32152c096f3fffb75431080711ce1f28451e" Oct 02 11:15:08 crc kubenswrapper[4783]: I1002 11:15:08.025917 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.252966 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.253579 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.304616 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.318709 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.675654 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.676032 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.719999 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.730814 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:12 crc kubenswrapper[4783]: I1002 11:15:12.983174 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:15:13 crc kubenswrapper[4783]: I1002 11:15:13.072581 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:13 crc kubenswrapper[4783]: I1002 11:15:13.072629 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 02 11:15:13 crc kubenswrapper[4783]: I1002 11:15:13.072643 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 02 11:15:13 crc kubenswrapper[4783]: I1002 11:15:13.072653 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:13 crc kubenswrapper[4783]: I1002 11:15:13.114011 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:15:21 crc kubenswrapper[4783]: I1002 11:15:21.513812 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:15:21 crc kubenswrapper[4783]: I1002 11:15:21.514275 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:15:21 crc kubenswrapper[4783]: I1002 11:15:21.514322 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:15:21 crc kubenswrapper[4783]: I1002 11:15:21.514979 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d68ca58a875615f7dd80b97789e236029261d5b31a2b176dd22b20723a10f851"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:15:21 crc kubenswrapper[4783]: I1002 11:15:21.515039 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://d68ca58a875615f7dd80b97789e236029261d5b31a2b176dd22b20723a10f851" gracePeriod=600 Oct 02 11:15:22 crc kubenswrapper[4783]: I1002 11:15:22.157556 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="d68ca58a875615f7dd80b97789e236029261d5b31a2b176dd22b20723a10f851" exitCode=0 Oct 02 11:15:22 crc kubenswrapper[4783]: I1002 11:15:22.157943 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"d68ca58a875615f7dd80b97789e236029261d5b31a2b176dd22b20723a10f851"} Oct 02 11:15:22 crc kubenswrapper[4783]: I1002 11:15:22.157995 4783 scope.go:117] "RemoveContainer" containerID="9eb9fd07e1e6e14820a34c4d9ea92acb1e0177338f9204e820b47da5ec49b7d3" Oct 02 11:15:22 crc kubenswrapper[4783]: I1002 11:15:22.981456 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:15:23 crc kubenswrapper[4783]: I1002 11:15:23.114193 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:15:23 crc kubenswrapper[4783]: I1002 11:15:23.115076 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:15:23 crc kubenswrapper[4783]: I1002 11:15:23.116083 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"58153738b4583103c9b6b2a35fefcf7fa1e4c224a6092baa570c4337ad49f3cb"} pod="openstack/horizon-567b57d86d-gv6fq" containerMessage="Container horizon failed startup probe, will be restarted" Oct 02 11:15:23 crc kubenswrapper[4783]: I1002 11:15:23.116137 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" containerID="cri-o://58153738b4583103c9b6b2a35fefcf7fa1e4c224a6092baa570c4337ad49f3cb" gracePeriod=30 Oct 02 11:15:24 crc kubenswrapper[4783]: I1002 11:15:24.177086 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04"} Oct 02 11:15:24 crc kubenswrapper[4783]: I1002 11:15:24.179270 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerStarted","Data":"16c1d591eee518b57312cf8588b935b0a0a8206392abbcea70c9d9b8d902dcc6"} Oct 02 11:15:24 crc kubenswrapper[4783]: E1002 11:15:24.193355 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ceilometer-central-agent\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/ceilometer-0" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" Oct 02 11:15:25 crc kubenswrapper[4783]: I1002 11:15:25.188866 4783 generic.go:334] "Generic (PLEG): container finished" podID="2f856246-d4db-48e2-81ec-b756ceba0667" containerID="17134fa1c0bb54795ef7e71f2b048d8f0c6857876242e93c430e358361994184" exitCode=0 Oct 02 11:15:25 crc kubenswrapper[4783]: I1002 11:15:25.188941 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bw8mq" event={"ID":"2f856246-d4db-48e2-81ec-b756ceba0667","Type":"ContainerDied","Data":"17134fa1c0bb54795ef7e71f2b048d8f0c6857876242e93c430e358361994184"} Oct 02 11:15:25 crc kubenswrapper[4783]: I1002 11:15:25.189316 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="ceilometer-notification-agent" containerID="cri-o://d6ca1a25e8a2b171e26d7e7cf65572d13be61eca7af3dba25dabbe62e8259bb6" gracePeriod=30 Oct 02 11:15:25 crc kubenswrapper[4783]: I1002 11:15:25.189363 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="proxy-httpd" containerID="cri-o://16c1d591eee518b57312cf8588b935b0a0a8206392abbcea70c9d9b8d902dcc6" gracePeriod=30 Oct 02 11:15:25 crc kubenswrapper[4783]: I1002 11:15:25.189385 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="sg-core" containerID="cri-o://d2040146a56d5ca88572ce0ab8e2e1bf67062b0343213e2ed5a11e8a1e67349d" gracePeriod=30 Oct 02 11:15:25 crc kubenswrapper[4783]: I1002 11:15:25.189632 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.199176 4783 generic.go:334] "Generic (PLEG): container finished" podID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerID="16c1d591eee518b57312cf8588b935b0a0a8206392abbcea70c9d9b8d902dcc6" exitCode=0 Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.199664 4783 generic.go:334] "Generic (PLEG): container finished" podID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerID="d2040146a56d5ca88572ce0ab8e2e1bf67062b0343213e2ed5a11e8a1e67349d" exitCode=2 Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.199673 4783 generic.go:334] "Generic (PLEG): container finished" podID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerID="d6ca1a25e8a2b171e26d7e7cf65572d13be61eca7af3dba25dabbe62e8259bb6" exitCode=0 Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.199219 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerDied","Data":"16c1d591eee518b57312cf8588b935b0a0a8206392abbcea70c9d9b8d902dcc6"} Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.201155 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerDied","Data":"d2040146a56d5ca88572ce0ab8e2e1bf67062b0343213e2ed5a11e8a1e67349d"} Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.201176 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerDied","Data":"d6ca1a25e8a2b171e26d7e7cf65572d13be61eca7af3dba25dabbe62e8259bb6"} Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.426932 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.539256 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnhr6\" (UniqueName: \"kubernetes.io/projected/193c55b5-888f-4738-b8f6-c075d2b396a5-kube-api-access-hnhr6\") pod \"193c55b5-888f-4738-b8f6-c075d2b396a5\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.539360 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-combined-ca-bundle\") pod \"193c55b5-888f-4738-b8f6-c075d2b396a5\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.539381 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-config-data\") pod \"193c55b5-888f-4738-b8f6-c075d2b396a5\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.539426 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-run-httpd\") pod \"193c55b5-888f-4738-b8f6-c075d2b396a5\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.539478 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-log-httpd\") pod \"193c55b5-888f-4738-b8f6-c075d2b396a5\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.539555 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-scripts\") pod \"193c55b5-888f-4738-b8f6-c075d2b396a5\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.539586 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-sg-core-conf-yaml\") pod \"193c55b5-888f-4738-b8f6-c075d2b396a5\" (UID: \"193c55b5-888f-4738-b8f6-c075d2b396a5\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.541680 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "193c55b5-888f-4738-b8f6-c075d2b396a5" (UID: "193c55b5-888f-4738-b8f6-c075d2b396a5"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.541954 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "193c55b5-888f-4738-b8f6-c075d2b396a5" (UID: "193c55b5-888f-4738-b8f6-c075d2b396a5"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.545009 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-scripts" (OuterVolumeSpecName: "scripts") pod "193c55b5-888f-4738-b8f6-c075d2b396a5" (UID: "193c55b5-888f-4738-b8f6-c075d2b396a5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.547870 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/193c55b5-888f-4738-b8f6-c075d2b396a5-kube-api-access-hnhr6" (OuterVolumeSpecName: "kube-api-access-hnhr6") pod "193c55b5-888f-4738-b8f6-c075d2b396a5" (UID: "193c55b5-888f-4738-b8f6-c075d2b396a5"). InnerVolumeSpecName "kube-api-access-hnhr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.647397 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "193c55b5-888f-4738-b8f6-c075d2b396a5" (UID: "193c55b5-888f-4738-b8f6-c075d2b396a5"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.658864 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.658895 4783 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.658904 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnhr6\" (UniqueName: \"kubernetes.io/projected/193c55b5-888f-4738-b8f6-c075d2b396a5-kube-api-access-hnhr6\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.658927 4783 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.658940 4783 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/193c55b5-888f-4738-b8f6-c075d2b396a5-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.667521 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-config-data" (OuterVolumeSpecName: "config-data") pod "193c55b5-888f-4738-b8f6-c075d2b396a5" (UID: "193c55b5-888f-4738-b8f6-c075d2b396a5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.676546 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "193c55b5-888f-4738-b8f6-c075d2b396a5" (UID: "193c55b5-888f-4738-b8f6-c075d2b396a5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.679914 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.680060 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.680538 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.680606 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.683042 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.683236 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.698456 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.761037 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.761081 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/193c55b5-888f-4738-b8f6-c075d2b396a5-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.862467 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-credential-keys\") pod \"2f856246-d4db-48e2-81ec-b756ceba0667\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.862572 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n5t9b\" (UniqueName: \"kubernetes.io/projected/2f856246-d4db-48e2-81ec-b756ceba0667-kube-api-access-n5t9b\") pod \"2f856246-d4db-48e2-81ec-b756ceba0667\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.862636 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-config-data\") pod \"2f856246-d4db-48e2-81ec-b756ceba0667\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.862669 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-scripts\") pod \"2f856246-d4db-48e2-81ec-b756ceba0667\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.862684 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-fernet-keys\") pod \"2f856246-d4db-48e2-81ec-b756ceba0667\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.862775 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-combined-ca-bundle\") pod \"2f856246-d4db-48e2-81ec-b756ceba0667\" (UID: \"2f856246-d4db-48e2-81ec-b756ceba0667\") " Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.868051 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "2f856246-d4db-48e2-81ec-b756ceba0667" (UID: "2f856246-d4db-48e2-81ec-b756ceba0667"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.868063 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "2f856246-d4db-48e2-81ec-b756ceba0667" (UID: "2f856246-d4db-48e2-81ec-b756ceba0667"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.868348 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f856246-d4db-48e2-81ec-b756ceba0667-kube-api-access-n5t9b" (OuterVolumeSpecName: "kube-api-access-n5t9b") pod "2f856246-d4db-48e2-81ec-b756ceba0667" (UID: "2f856246-d4db-48e2-81ec-b756ceba0667"). InnerVolumeSpecName "kube-api-access-n5t9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.870517 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-scripts" (OuterVolumeSpecName: "scripts") pod "2f856246-d4db-48e2-81ec-b756ceba0667" (UID: "2f856246-d4db-48e2-81ec-b756ceba0667"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.914534 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-config-data" (OuterVolumeSpecName: "config-data") pod "2f856246-d4db-48e2-81ec-b756ceba0667" (UID: "2f856246-d4db-48e2-81ec-b756ceba0667"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.914577 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f856246-d4db-48e2-81ec-b756ceba0667" (UID: "2f856246-d4db-48e2-81ec-b756ceba0667"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.964595 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.964633 4783 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-credential-keys\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.964645 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n5t9b\" (UniqueName: \"kubernetes.io/projected/2f856246-d4db-48e2-81ec-b756ceba0667-kube-api-access-n5t9b\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.964656 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.964664 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:26 crc kubenswrapper[4783]: I1002 11:15:26.964671 4783 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2f856246-d4db-48e2-81ec-b756ceba0667-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.209091 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-bw8mq" event={"ID":"2f856246-d4db-48e2-81ec-b756ceba0667","Type":"ContainerDied","Data":"7a07a62448795fb0f1aef403c911e2ab754085d940f7cd6997a0196469f1293c"} Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.209128 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7a07a62448795fb0f1aef403c911e2ab754085d940f7cd6997a0196469f1293c" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.209212 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-bw8mq" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.218553 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.218595 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"193c55b5-888f-4738-b8f6-c075d2b396a5","Type":"ContainerDied","Data":"91e56996ca82437763b8805557846cda31c00af7c1199b0f5ea4eac443f36f0b"} Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.218630 4783 scope.go:117] "RemoveContainer" containerID="16c1d591eee518b57312cf8588b935b0a0a8206392abbcea70c9d9b8d902dcc6" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.256350 4783 scope.go:117] "RemoveContainer" containerID="d2040146a56d5ca88572ce0ab8e2e1bf67062b0343213e2ed5a11e8a1e67349d" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.274929 4783 scope.go:117] "RemoveContainer" containerID="d6ca1a25e8a2b171e26d7e7cf65572d13be61eca7af3dba25dabbe62e8259bb6" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.305243 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.314249 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376070 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-6c6c7fffd4-bbhdp"] Oct 02 11:15:27 crc kubenswrapper[4783]: E1002 11:15:27.376544 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="proxy-httpd" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376564 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="proxy-httpd" Oct 02 11:15:27 crc kubenswrapper[4783]: E1002 11:15:27.376584 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerName="init" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376592 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerName="init" Oct 02 11:15:27 crc kubenswrapper[4783]: E1002 11:15:27.376605 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="ceilometer-notification-agent" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376613 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="ceilometer-notification-agent" Oct 02 11:15:27 crc kubenswrapper[4783]: E1002 11:15:27.376635 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="sg-core" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376642 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="sg-core" Oct 02 11:15:27 crc kubenswrapper[4783]: E1002 11:15:27.376656 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f856246-d4db-48e2-81ec-b756ceba0667" containerName="keystone-bootstrap" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376663 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f856246-d4db-48e2-81ec-b756ceba0667" containerName="keystone-bootstrap" Oct 02 11:15:27 crc kubenswrapper[4783]: E1002 11:15:27.376681 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0af4bab9-5198-4d2c-a811-17db77304d40" containerName="collect-profiles" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376689 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0af4bab9-5198-4d2c-a811-17db77304d40" containerName="collect-profiles" Oct 02 11:15:27 crc kubenswrapper[4783]: E1002 11:15:27.376706 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerName="dnsmasq-dns" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376715 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerName="dnsmasq-dns" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376909 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="ceilometer-notification-agent" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376925 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="sg-core" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376942 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f856246-d4db-48e2-81ec-b756ceba0667" containerName="keystone-bootstrap" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376952 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0af4bab9-5198-4d2c-a811-17db77304d40" containerName="collect-profiles" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376967 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ab6e0516-5a5b-45c3-9f57-f1181fd69bac" containerName="dnsmasq-dns" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.376979 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" containerName="proxy-httpd" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.377689 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.382673 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.382880 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.383064 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.383180 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-wgqxk" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.383299 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.383761 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.404783 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.406732 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.409538 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.409783 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.425199 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473341 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftq4z\" (UniqueName: \"kubernetes.io/projected/b867ffde-a8a3-493e-b07e-f9c8320417ba-kube-api-access-ftq4z\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473406 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-internal-tls-certs\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473480 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-credential-keys\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473516 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-scripts\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473595 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-combined-ca-bundle\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473629 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-scripts\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473715 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-log-httpd\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473751 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-public-tls-certs\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473793 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-th9mn\" (UniqueName: \"kubernetes.io/projected/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-kube-api-access-th9mn\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473820 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473874 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-run-httpd\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473930 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-fernet-keys\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.473972 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.474000 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-config-data\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.474033 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-config-data\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.507219 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c6c7fffd4-bbhdp"] Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.558187 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="193c55b5-888f-4738-b8f6-c075d2b396a5" path="/var/lib/kubelet/pods/193c55b5-888f-4738-b8f6-c075d2b396a5/volumes" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.575699 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-log-httpd\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.576323 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-public-tls-certs\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577022 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-th9mn\" (UniqueName: \"kubernetes.io/projected/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-kube-api-access-th9mn\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577184 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577285 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-run-httpd\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577435 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-fernet-keys\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577565 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577656 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-config-data\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577741 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-config-data\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577835 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftq4z\" (UniqueName: \"kubernetes.io/projected/b867ffde-a8a3-493e-b07e-f9c8320417ba-kube-api-access-ftq4z\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.577902 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-internal-tls-certs\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.578359 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-credential-keys\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.578490 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-scripts\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.578604 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-combined-ca-bundle\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.578699 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-scripts\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.583248 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-scripts\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.576286 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-log-httpd\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.590095 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-run-httpd\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.601583 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-config-data\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.602377 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-scripts\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.602636 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-internal-tls-certs\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.608424 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-config-data\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.608823 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-credential-keys\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.612042 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-fernet-keys\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.615639 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftq4z\" (UniqueName: \"kubernetes.io/projected/b867ffde-a8a3-493e-b07e-f9c8320417ba-kube-api-access-ftq4z\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.617076 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.618917 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-public-tls-certs\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.619159 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-combined-ca-bundle\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.619602 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " pod="openstack/ceilometer-0" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.621957 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-th9mn\" (UniqueName: \"kubernetes.io/projected/2ead5b5c-0e3e-4479-9d0f-affe273fe41d-kube-api-access-th9mn\") pod \"keystone-6c6c7fffd4-bbhdp\" (UID: \"2ead5b5c-0e3e-4479-9d0f-affe273fe41d\") " pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.732941 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:27 crc kubenswrapper[4783]: I1002 11:15:27.749258 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:15:28 crc kubenswrapper[4783]: I1002 11:15:28.346563 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-6c6c7fffd4-bbhdp"] Oct 02 11:15:28 crc kubenswrapper[4783]: I1002 11:15:28.489521 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.269233 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerStarted","Data":"ca63c1a8d935112ad8441ba3f78fb1f2e77ef839da542b263bbf13fc6abeeea0"} Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.269874 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerStarted","Data":"366c70a0dda543d8e6d5cf517d0bdd49034748aac82c143f6b934ac6d27c9927"} Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.273518 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c6c7fffd4-bbhdp" event={"ID":"2ead5b5c-0e3e-4479-9d0f-affe273fe41d","Type":"ContainerStarted","Data":"196db9a0ee987c0b941bc59d559af1d14d2228c2d81ca58a3882fb1afd2dae90"} Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.273569 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-6c6c7fffd4-bbhdp" event={"ID":"2ead5b5c-0e3e-4479-9d0f-affe273fe41d","Type":"ContainerStarted","Data":"c8eea838a109912cc727cd99ce4f130420b696af8b31dac311f81c058ceccaa2"} Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.273592 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.276666 4783 generic.go:334] "Generic (PLEG): container finished" podID="6c42c3ba-c130-4b8d-940a-9aa134629554" containerID="d0393a0ce247d27be012699cdac67d87ae42e593c65a68d13ea245fd10b74bc7" exitCode=0 Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.276715 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-l9zch" event={"ID":"6c42c3ba-c130-4b8d-940a-9aa134629554","Type":"ContainerDied","Data":"d0393a0ce247d27be012699cdac67d87ae42e593c65a68d13ea245fd10b74bc7"} Oct 02 11:15:29 crc kubenswrapper[4783]: I1002 11:15:29.318658 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-6c6c7fffd4-bbhdp" podStartSLOduration=2.318638562 podStartE2EDuration="2.318638562s" podCreationTimestamp="2025-10-02 11:15:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:29.29436667 +0000 UTC m=+1362.610560931" watchObservedRunningTime="2025-10-02 11:15:29.318638562 +0000 UTC m=+1362.634832823" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.629952 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-l9zch" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.750484 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle\") pod \"6c42c3ba-c130-4b8d-940a-9aa134629554\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.750874 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6fn5j\" (UniqueName: \"kubernetes.io/projected/6c42c3ba-c130-4b8d-940a-9aa134629554-kube-api-access-6fn5j\") pod \"6c42c3ba-c130-4b8d-940a-9aa134629554\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.750952 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-config-data\") pod \"6c42c3ba-c130-4b8d-940a-9aa134629554\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.751010 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c42c3ba-c130-4b8d-940a-9aa134629554-logs\") pod \"6c42c3ba-c130-4b8d-940a-9aa134629554\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.751088 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-scripts\") pod \"6c42c3ba-c130-4b8d-940a-9aa134629554\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.751575 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c42c3ba-c130-4b8d-940a-9aa134629554-logs" (OuterVolumeSpecName: "logs") pod "6c42c3ba-c130-4b8d-940a-9aa134629554" (UID: "6c42c3ba-c130-4b8d-940a-9aa134629554"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.755144 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c42c3ba-c130-4b8d-940a-9aa134629554-kube-api-access-6fn5j" (OuterVolumeSpecName: "kube-api-access-6fn5j") pod "6c42c3ba-c130-4b8d-940a-9aa134629554" (UID: "6c42c3ba-c130-4b8d-940a-9aa134629554"). InnerVolumeSpecName "kube-api-access-6fn5j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.763694 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-scripts" (OuterVolumeSpecName: "scripts") pod "6c42c3ba-c130-4b8d-940a-9aa134629554" (UID: "6c42c3ba-c130-4b8d-940a-9aa134629554"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:30 crc kubenswrapper[4783]: E1002 11:15:30.776726 4783 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle podName:6c42c3ba-c130-4b8d-940a-9aa134629554 nodeName:}" failed. No retries permitted until 2025-10-02 11:15:31.276697379 +0000 UTC m=+1364.592891640 (durationBeforeRetry 500ms). Error: error cleaning subPath mounts for volume "combined-ca-bundle" (UniqueName: "kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle") pod "6c42c3ba-c130-4b8d-940a-9aa134629554" (UID: "6c42c3ba-c130-4b8d-940a-9aa134629554") : error deleting /var/lib/kubelet/pods/6c42c3ba-c130-4b8d-940a-9aa134629554/volume-subpaths: remove /var/lib/kubelet/pods/6c42c3ba-c130-4b8d-940a-9aa134629554/volume-subpaths: no such file or directory Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.780352 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-config-data" (OuterVolumeSpecName: "config-data") pod "6c42c3ba-c130-4b8d-940a-9aa134629554" (UID: "6c42c3ba-c130-4b8d-940a-9aa134629554"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.852873 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.852912 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c42c3ba-c130-4b8d-940a-9aa134629554-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.852923 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:30 crc kubenswrapper[4783]: I1002 11:15:30.852933 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6fn5j\" (UniqueName: \"kubernetes.io/projected/6c42c3ba-c130-4b8d-940a-9aa134629554-kube-api-access-6fn5j\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.296750 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-l9zch" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.296755 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-l9zch" event={"ID":"6c42c3ba-c130-4b8d-940a-9aa134629554","Type":"ContainerDied","Data":"ef08a280052523ad2bdcf6eaf8420f429605f264aca3f5922e59ebc4573e9c6b"} Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.297116 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ef08a280052523ad2bdcf6eaf8420f429605f264aca3f5922e59ebc4573e9c6b" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.299650 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerStarted","Data":"495f2a3bbeb51a2655dad522f20f1f5fa9d89f13a2b7642ad3e5791e05d1cdd4"} Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.360121 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle\") pod \"6c42c3ba-c130-4b8d-940a-9aa134629554\" (UID: \"6c42c3ba-c130-4b8d-940a-9aa134629554\") " Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.364269 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c42c3ba-c130-4b8d-940a-9aa134629554" (UID: "6c42c3ba-c130-4b8d-940a-9aa134629554"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.413068 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-6dd865cb48-8hqrj"] Oct 02 11:15:31 crc kubenswrapper[4783]: E1002 11:15:31.413503 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c42c3ba-c130-4b8d-940a-9aa134629554" containerName="placement-db-sync" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.413527 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c42c3ba-c130-4b8d-940a-9aa134629554" containerName="placement-db-sync" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.413737 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c42c3ba-c130-4b8d-940a-9aa134629554" containerName="placement-db-sync" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.414896 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.420221 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.420300 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.443601 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6dd865cb48-8hqrj"] Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.465934 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c42c3ba-c130-4b8d-940a-9aa134629554-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.571167 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q5scd\" (UniqueName: \"kubernetes.io/projected/83c170e0-d600-413c-b4a9-dbe838f2bcd2-kube-api-access-q5scd\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.571250 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-config-data\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.571296 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-combined-ca-bundle\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.571378 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-scripts\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.571459 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-internal-tls-certs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.571516 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83c170e0-d600-413c-b4a9-dbe838f2bcd2-logs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.571590 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-public-tls-certs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.673181 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-scripts\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.673250 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-internal-tls-certs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.673320 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83c170e0-d600-413c-b4a9-dbe838f2bcd2-logs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.673808 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/83c170e0-d600-413c-b4a9-dbe838f2bcd2-logs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.674062 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-public-tls-certs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.674193 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q5scd\" (UniqueName: \"kubernetes.io/projected/83c170e0-d600-413c-b4a9-dbe838f2bcd2-kube-api-access-q5scd\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.674243 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-config-data\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.674269 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-combined-ca-bundle\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.678114 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-internal-tls-certs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.682632 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-scripts\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.682992 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-combined-ca-bundle\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.685260 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-public-tls-certs\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.696892 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/83c170e0-d600-413c-b4a9-dbe838f2bcd2-config-data\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.708775 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q5scd\" (UniqueName: \"kubernetes.io/projected/83c170e0-d600-413c-b4a9-dbe838f2bcd2-kube-api-access-q5scd\") pod \"placement-6dd865cb48-8hqrj\" (UID: \"83c170e0-d600-413c-b4a9-dbe838f2bcd2\") " pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:31 crc kubenswrapper[4783]: I1002 11:15:31.743971 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:32 crc kubenswrapper[4783]: I1002 11:15:32.266543 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-6dd865cb48-8hqrj"] Oct 02 11:15:32 crc kubenswrapper[4783]: W1002 11:15:32.293595 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod83c170e0_d600_413c_b4a9_dbe838f2bcd2.slice/crio-d905e57bee15e73c7e99fac02d6f8088c31aa6826dd1e01aad690ada830ea93a WatchSource:0}: Error finding container d905e57bee15e73c7e99fac02d6f8088c31aa6826dd1e01aad690ada830ea93a: Status 404 returned error can't find the container with id d905e57bee15e73c7e99fac02d6f8088c31aa6826dd1e01aad690ada830ea93a Oct 02 11:15:32 crc kubenswrapper[4783]: I1002 11:15:32.320211 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerStarted","Data":"25e3634a2d23ed6c0e9b74fba5128b629f194bea54ffe3f4d1078234e4aff9eb"} Oct 02 11:15:32 crc kubenswrapper[4783]: I1002 11:15:32.324007 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6dd865cb48-8hqrj" event={"ID":"83c170e0-d600-413c-b4a9-dbe838f2bcd2","Type":"ContainerStarted","Data":"d905e57bee15e73c7e99fac02d6f8088c31aa6826dd1e01aad690ada830ea93a"} Oct 02 11:15:32 crc kubenswrapper[4783]: I1002 11:15:32.981134 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:15:32 crc kubenswrapper[4783]: I1002 11:15:32.981391 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:15:32 crc kubenswrapper[4783]: I1002 11:15:32.982117 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3"} pod="openstack/horizon-5fcdf587dd-wvthh" containerMessage="Container horizon failed startup probe, will be restarted" Oct 02 11:15:32 crc kubenswrapper[4783]: I1002 11:15:32.982152 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" containerID="cri-o://17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3" gracePeriod=30 Oct 02 11:15:33 crc kubenswrapper[4783]: I1002 11:15:33.333698 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6dd865cb48-8hqrj" event={"ID":"83c170e0-d600-413c-b4a9-dbe838f2bcd2","Type":"ContainerStarted","Data":"fb7e6424164e4b0cff3dc6a026d5729b9cc2d6cfb473583b89a254700c63be9b"} Oct 02 11:15:33 crc kubenswrapper[4783]: I1002 11:15:33.334075 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:33 crc kubenswrapper[4783]: I1002 11:15:33.334096 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:15:33 crc kubenswrapper[4783]: I1002 11:15:33.334108 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-6dd865cb48-8hqrj" event={"ID":"83c170e0-d600-413c-b4a9-dbe838f2bcd2","Type":"ContainerStarted","Data":"45981ac1a1a22d041008cf721ae8222687c940fbd4dd67a0ee93c6e6d9f5c2bb"} Oct 02 11:15:33 crc kubenswrapper[4783]: I1002 11:15:33.363772 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-6dd865cb48-8hqrj" podStartSLOduration=2.363754707 podStartE2EDuration="2.363754707s" podCreationTimestamp="2025-10-02 11:15:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:33.352604822 +0000 UTC m=+1366.668799083" watchObservedRunningTime="2025-10-02 11:15:33.363754707 +0000 UTC m=+1366.679948968" Oct 02 11:15:34 crc kubenswrapper[4783]: I1002 11:15:34.356273 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerStarted","Data":"e82e9e7db4e91bfaeb0b5cbfafdbe9d0a450f7f11bb50d0b31a39e9ad4cad3da"} Oct 02 11:15:34 crc kubenswrapper[4783]: I1002 11:15:34.356747 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 02 11:15:34 crc kubenswrapper[4783]: I1002 11:15:34.389977 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.2582474980000002 podStartE2EDuration="7.389958785s" podCreationTimestamp="2025-10-02 11:15:27 +0000 UTC" firstStartedPulling="2025-10-02 11:15:28.509225537 +0000 UTC m=+1361.825419798" lastFinishedPulling="2025-10-02 11:15:33.640936814 +0000 UTC m=+1366.957131085" observedRunningTime="2025-10-02 11:15:34.388397212 +0000 UTC m=+1367.704591473" watchObservedRunningTime="2025-10-02 11:15:34.389958785 +0000 UTC m=+1367.706153046" Oct 02 11:15:37 crc kubenswrapper[4783]: I1002 11:15:37.387244 4783 generic.go:334] "Generic (PLEG): container finished" podID="68c31bf9-b59a-43ed-bb74-9e6cc0bce703" containerID="3bc304ac88d2823182da9a993a1fdbce5020a427e3173b94a6214924e530d810" exitCode=0 Oct 02 11:15:37 crc kubenswrapper[4783]: I1002 11:15:37.387304 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g9szh" event={"ID":"68c31bf9-b59a-43ed-bb74-9e6cc0bce703","Type":"ContainerDied","Data":"3bc304ac88d2823182da9a993a1fdbce5020a427e3173b94a6214924e530d810"} Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.734268 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g9szh" Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.824258 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qz4gg\" (UniqueName: \"kubernetes.io/projected/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-kube-api-access-qz4gg\") pod \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.824696 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-db-sync-config-data\") pod \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.824813 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-combined-ca-bundle\") pod \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\" (UID: \"68c31bf9-b59a-43ed-bb74-9e6cc0bce703\") " Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.835485 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-kube-api-access-qz4gg" (OuterVolumeSpecName: "kube-api-access-qz4gg") pod "68c31bf9-b59a-43ed-bb74-9e6cc0bce703" (UID: "68c31bf9-b59a-43ed-bb74-9e6cc0bce703"). InnerVolumeSpecName "kube-api-access-qz4gg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.837116 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "68c31bf9-b59a-43ed-bb74-9e6cc0bce703" (UID: "68c31bf9-b59a-43ed-bb74-9e6cc0bce703"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.850680 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "68c31bf9-b59a-43ed-bb74-9e6cc0bce703" (UID: "68c31bf9-b59a-43ed-bb74-9e6cc0bce703"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.926405 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qz4gg\" (UniqueName: \"kubernetes.io/projected/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-kube-api-access-qz4gg\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.926443 4783 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:38 crc kubenswrapper[4783]: I1002 11:15:38.926453 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/68c31bf9-b59a-43ed-bb74-9e6cc0bce703-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.408920 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-g9szh" event={"ID":"68c31bf9-b59a-43ed-bb74-9e6cc0bce703","Type":"ContainerDied","Data":"666d6ee25b993c942fe5560cfaf5049cc4abdb77d5ba838471629d093c775eca"} Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.408956 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="666d6ee25b993c942fe5560cfaf5049cc4abdb77d5ba838471629d093c775eca" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.408998 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-g9szh" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.726070 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7485d5d8b5-7jbpr"] Oct 02 11:15:39 crc kubenswrapper[4783]: E1002 11:15:39.726443 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="68c31bf9-b59a-43ed-bb74-9e6cc0bce703" containerName="barbican-db-sync" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.726458 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="68c31bf9-b59a-43ed-bb74-9e6cc0bce703" containerName="barbican-db-sync" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.726643 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="68c31bf9-b59a-43ed-bb74-9e6cc0bce703" containerName="barbican-db-sync" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.727595 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.738008 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.739547 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-kzrxl" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.739763 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.759895 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-6864f89bf8-2zjrd"] Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.761280 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.765197 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.774886 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7485d5d8b5-7jbpr"] Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.826075 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6864f89bf8-2zjrd"] Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.840982 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-config-data-custom\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841021 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cnjwv\" (UniqueName: \"kubernetes.io/projected/47c9e784-ab99-406c-9dd6-c0b10742349e-kube-api-access-cnjwv\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841185 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-config-data\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841277 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-combined-ca-bundle\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841390 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-combined-ca-bundle\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841546 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jpkfb\" (UniqueName: \"kubernetes.io/projected/a5f3639d-51fc-440a-819f-2cbcc93adda0-kube-api-access-jpkfb\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841607 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-config-data-custom\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841731 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47c9e784-ab99-406c-9dd6-c0b10742349e-logs\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841774 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5f3639d-51fc-440a-819f-2cbcc93adda0-logs\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.841895 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-config-data\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.870841 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-2c72r"] Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.872238 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.900102 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-2c72r"] Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945764 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-swift-storage-0\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945813 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-config-data-custom\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945838 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cnjwv\" (UniqueName: \"kubernetes.io/projected/47c9e784-ab99-406c-9dd6-c0b10742349e-kube-api-access-cnjwv\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945868 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-config-data\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945886 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-combined-ca-bundle\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945914 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-combined-ca-bundle\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945936 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-nb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945969 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jpkfb\" (UniqueName: \"kubernetes.io/projected/a5f3639d-51fc-440a-819f-2cbcc93adda0-kube-api-access-jpkfb\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.945987 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-config-data-custom\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.946021 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-svc\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.946074 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47c9e784-ab99-406c-9dd6-c0b10742349e-logs\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.946103 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5f3639d-51fc-440a-819f-2cbcc93adda0-logs\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.946140 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-sb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.946166 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2lnrb\" (UniqueName: \"kubernetes.io/projected/149e251f-f012-4120-ac20-bbd76bf0fcd3-kube-api-access-2lnrb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.946215 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-config-data\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.946258 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-config\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.947831 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a5f3639d-51fc-440a-819f-2cbcc93adda0-logs\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.948100 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47c9e784-ab99-406c-9dd6-c0b10742349e-logs\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.954914 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-config-data-custom\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.957897 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-config-data-custom\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.958271 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-combined-ca-bundle\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.962949 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-config-data\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.964010 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47c9e784-ab99-406c-9dd6-c0b10742349e-config-data\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.964224 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a5f3639d-51fc-440a-819f-2cbcc93adda0-combined-ca-bundle\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.972049 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jpkfb\" (UniqueName: \"kubernetes.io/projected/a5f3639d-51fc-440a-819f-2cbcc93adda0-kube-api-access-jpkfb\") pod \"barbican-keystone-listener-6864f89bf8-2zjrd\" (UID: \"a5f3639d-51fc-440a-819f-2cbcc93adda0\") " pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:39 crc kubenswrapper[4783]: I1002 11:15:39.976132 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cnjwv\" (UniqueName: \"kubernetes.io/projected/47c9e784-ab99-406c-9dd6-c0b10742349e-kube-api-access-cnjwv\") pod \"barbican-worker-7485d5d8b5-7jbpr\" (UID: \"47c9e784-ab99-406c-9dd6-c0b10742349e\") " pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.033504 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-77768f4b8b-q72bm"] Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.035153 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.037510 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.047184 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-config\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.047233 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-swift-storage-0\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.047310 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-nb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.047350 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-svc\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.047437 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-sb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.047465 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2lnrb\" (UniqueName: \"kubernetes.io/projected/149e251f-f012-4120-ac20-bbd76bf0fcd3-kube-api-access-2lnrb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.048218 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-config\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.048403 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-swift-storage-0\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.048908 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-svc\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.049732 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7485d5d8b5-7jbpr" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.052225 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-nb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.053055 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-sb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.066859 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77768f4b8b-q72bm"] Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.079361 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.101525 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2lnrb\" (UniqueName: \"kubernetes.io/projected/149e251f-f012-4120-ac20-bbd76bf0fcd3-kube-api-access-2lnrb\") pod \"dnsmasq-dns-59d5ff467f-2c72r\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.148625 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-logs\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.151999 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data-custom\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.152083 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.152104 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zf5pj\" (UniqueName: \"kubernetes.io/projected/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-kube-api-access-zf5pj\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.152133 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-combined-ca-bundle\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.198588 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.253955 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-logs\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.254113 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data-custom\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.254188 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.254214 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zf5pj\" (UniqueName: \"kubernetes.io/projected/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-kube-api-access-zf5pj\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.254243 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-combined-ca-bundle\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.255475 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-logs\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.260706 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-combined-ca-bundle\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.261657 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data-custom\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.262518 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.271054 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zf5pj\" (UniqueName: \"kubernetes.io/projected/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-kube-api-access-zf5pj\") pod \"barbican-api-77768f4b8b-q72bm\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.376045 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.623776 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7485d5d8b5-7jbpr"] Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.669991 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-6864f89bf8-2zjrd"] Oct 02 11:15:40 crc kubenswrapper[4783]: I1002 11:15:40.800327 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-2c72r"] Oct 02 11:15:40 crc kubenswrapper[4783]: W1002 11:15:40.818921 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod149e251f_f012_4120_ac20_bbd76bf0fcd3.slice/crio-3cbbc3bc4598e0085effb3513ed5ba3df96286607dc628b8674e2c535c79745d WatchSource:0}: Error finding container 3cbbc3bc4598e0085effb3513ed5ba3df96286607dc628b8674e2c535c79745d: Status 404 returned error can't find the container with id 3cbbc3bc4598e0085effb3513ed5ba3df96286607dc628b8674e2c535c79745d Oct 02 11:15:41 crc kubenswrapper[4783]: W1002 11:15:41.052127 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f6561e9_9bc7_4cde_adda_c142e6f6eb7a.slice/crio-9a4faa254df25a00ca295c5e54d6fbab32b5f7ffb325f9338b45d83d8ebf2004 WatchSource:0}: Error finding container 9a4faa254df25a00ca295c5e54d6fbab32b5f7ffb325f9338b45d83d8ebf2004: Status 404 returned error can't find the container with id 9a4faa254df25a00ca295c5e54d6fbab32b5f7ffb325f9338b45d83d8ebf2004 Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.063011 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-77768f4b8b-q72bm"] Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.443526 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77768f4b8b-q72bm" event={"ID":"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a","Type":"ContainerStarted","Data":"42dbf0d885c2f45bc7c336d620f72044895639ebf18513403d8d9842f3988e6b"} Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.443575 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77768f4b8b-q72bm" event={"ID":"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a","Type":"ContainerStarted","Data":"9a4faa254df25a00ca295c5e54d6fbab32b5f7ffb325f9338b45d83d8ebf2004"} Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.445006 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7485d5d8b5-7jbpr" event={"ID":"47c9e784-ab99-406c-9dd6-c0b10742349e","Type":"ContainerStarted","Data":"3a479bf2fa169f16407876d2e5a916a9a28b262310eb6ac795ee6c190de709d1"} Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.446752 4783 generic.go:334] "Generic (PLEG): container finished" podID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerID="4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4" exitCode=0 Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.447821 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" event={"ID":"149e251f-f012-4120-ac20-bbd76bf0fcd3","Type":"ContainerDied","Data":"4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4"} Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.447849 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" event={"ID":"149e251f-f012-4120-ac20-bbd76bf0fcd3","Type":"ContainerStarted","Data":"3cbbc3bc4598e0085effb3513ed5ba3df96286607dc628b8674e2c535c79745d"} Oct 02 11:15:41 crc kubenswrapper[4783]: I1002 11:15:41.451186 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" event={"ID":"a5f3639d-51fc-440a-819f-2cbcc93adda0","Type":"ContainerStarted","Data":"1eaf2d05421c0dca7c77dcef2998a3f1d2aec14d1933af8ecfdb375b12c1259d"} Oct 02 11:15:42 crc kubenswrapper[4783]: I1002 11:15:42.463278 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77768f4b8b-q72bm" event={"ID":"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a","Type":"ContainerStarted","Data":"8589ca3751f97966b896bc74baed6713e4a70d9e4ad200e250db9c7cf485015a"} Oct 02 11:15:42 crc kubenswrapper[4783]: I1002 11:15:42.463775 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:42 crc kubenswrapper[4783]: I1002 11:15:42.465279 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" event={"ID":"149e251f-f012-4120-ac20-bbd76bf0fcd3","Type":"ContainerStarted","Data":"2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2"} Oct 02 11:15:42 crc kubenswrapper[4783]: I1002 11:15:42.465997 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:42 crc kubenswrapper[4783]: I1002 11:15:42.503483 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-77768f4b8b-q72bm" podStartSLOduration=3.503463392 podStartE2EDuration="3.503463392s" podCreationTimestamp="2025-10-02 11:15:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:42.498047894 +0000 UTC m=+1375.814242155" watchObservedRunningTime="2025-10-02 11:15:42.503463392 +0000 UTC m=+1375.819657653" Oct 02 11:15:42 crc kubenswrapper[4783]: I1002 11:15:42.551972 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" podStartSLOduration=3.551953139 podStartE2EDuration="3.551953139s" podCreationTimestamp="2025-10-02 11:15:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:42.540857346 +0000 UTC m=+1375.857051607" watchObservedRunningTime="2025-10-02 11:15:42.551953139 +0000 UTC m=+1375.868147400" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.024600 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5bb7dfc68d-7dl59"] Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.025957 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: W1002 11:15:43.027655 4783 reflector.go:561] object-"openstack"/"cert-barbican-internal-svc": failed to list *v1.Secret: secrets "cert-barbican-internal-svc" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Oct 02 11:15:43 crc kubenswrapper[4783]: E1002 11:15:43.027697 4783 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"cert-barbican-internal-svc\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cert-barbican-internal-svc\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 02 11:15:43 crc kubenswrapper[4783]: W1002 11:15:43.027748 4783 reflector.go:561] object-"openstack"/"cert-barbican-public-svc": failed to list *v1.Secret: secrets "cert-barbican-public-svc" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openstack": no relationship found between node 'crc' and this object Oct 02 11:15:43 crc kubenswrapper[4783]: E1002 11:15:43.027757 4783 reflector.go:158] "Unhandled Error" err="object-\"openstack\"/\"cert-barbican-public-svc\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"cert-barbican-public-svc\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openstack\": no relationship found between node 'crc' and this object" logger="UnhandledError" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.053137 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5bb7dfc68d-7dl59"] Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.125738 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-config-data-custom\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.125799 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-internal-tls-certs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.125872 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-public-tls-certs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.125994 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-config-data\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.126049 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-combined-ca-bundle\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.126089 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-logs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.126119 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xg8zb\" (UniqueName: \"kubernetes.io/projected/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-kube-api-access-xg8zb\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.227746 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-internal-tls-certs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.228026 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-public-tls-certs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.228099 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-config-data\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.228135 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-combined-ca-bundle\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.228155 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-logs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.228175 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xg8zb\" (UniqueName: \"kubernetes.io/projected/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-kube-api-access-xg8zb\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.228213 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-config-data-custom\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.229624 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-logs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.238821 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-combined-ca-bundle\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.252473 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-config-data\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.252886 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-config-data-custom\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.260378 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xg8zb\" (UniqueName: \"kubernetes.io/projected/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-kube-api-access-xg8zb\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:43 crc kubenswrapper[4783]: I1002 11:15:43.560246 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.068898 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.075956 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-internal-tls-certs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.153123 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.162692 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/9f9ca941-0e76-41d9-b0c0-7a4dada2640b-public-tls-certs\") pod \"barbican-api-5bb7dfc68d-7dl59\" (UID: \"9f9ca941-0e76-41d9-b0c0-7a4dada2640b\") " pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.244007 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.568383 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" event={"ID":"a5f3639d-51fc-440a-819f-2cbcc93adda0","Type":"ContainerStarted","Data":"fb1d55407a1c83df9c8b4f7bc5a8146fd4c1c0c4870b11e448aba1801b60c3e4"} Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.570156 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7485d5d8b5-7jbpr" event={"ID":"47c9e784-ab99-406c-9dd6-c0b10742349e","Type":"ContainerStarted","Data":"a2c99b73c72323bf22b8b4858cd704ddd5408f99fb863dd505e70703fa15d857"} Oct 02 11:15:44 crc kubenswrapper[4783]: I1002 11:15:44.717956 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5bb7dfc68d-7dl59"] Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.579864 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5bb7dfc68d-7dl59" event={"ID":"9f9ca941-0e76-41d9-b0c0-7a4dada2640b","Type":"ContainerStarted","Data":"3e950cb337109b056045897f8c04ba60486d21f4077e365d96e4b55f2947ebf4"} Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.580186 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5bb7dfc68d-7dl59" event={"ID":"9f9ca941-0e76-41d9-b0c0-7a4dada2640b","Type":"ContainerStarted","Data":"a5d4f34b3d41d79d010651a01e886205b091938e243040edf24961c10732c4a6"} Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.580196 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5bb7dfc68d-7dl59" event={"ID":"9f9ca941-0e76-41d9-b0c0-7a4dada2640b","Type":"ContainerStarted","Data":"b13bea2262a8abebe6d956d06b650d2b74a38149736c74a51f0d79d3610d5e80"} Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.581318 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.581341 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.587477 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7485d5d8b5-7jbpr" event={"ID":"47c9e784-ab99-406c-9dd6-c0b10742349e","Type":"ContainerStarted","Data":"d7d3e52cbeafd1dd37a339290465f1d840d7fcb225265c965054e44f8d0be615"} Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.592730 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" event={"ID":"a5f3639d-51fc-440a-819f-2cbcc93adda0","Type":"ContainerStarted","Data":"f6739452ba8b353656e2f79f508355d2d683f0690cd7dee072eaa557994409db"} Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.605699 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5bb7dfc68d-7dl59" podStartSLOduration=2.60564224 podStartE2EDuration="2.60564224s" podCreationTimestamp="2025-10-02 11:15:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:45.60455535 +0000 UTC m=+1378.920749611" watchObservedRunningTime="2025-10-02 11:15:45.60564224 +0000 UTC m=+1378.921836501" Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.635027 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7485d5d8b5-7jbpr" podStartSLOduration=3.856790904 podStartE2EDuration="6.635005844s" podCreationTimestamp="2025-10-02 11:15:39 +0000 UTC" firstStartedPulling="2025-10-02 11:15:40.624219537 +0000 UTC m=+1373.940413798" lastFinishedPulling="2025-10-02 11:15:43.402434487 +0000 UTC m=+1376.718628738" observedRunningTime="2025-10-02 11:15:45.627951741 +0000 UTC m=+1378.944146002" watchObservedRunningTime="2025-10-02 11:15:45.635005844 +0000 UTC m=+1378.951200105" Oct 02 11:15:45 crc kubenswrapper[4783]: I1002 11:15:45.653975 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-6864f89bf8-2zjrd" podStartSLOduration=3.963921226 podStartE2EDuration="6.653948932s" podCreationTimestamp="2025-10-02 11:15:39 +0000 UTC" firstStartedPulling="2025-10-02 11:15:40.710614232 +0000 UTC m=+1374.026808493" lastFinishedPulling="2025-10-02 11:15:43.400641938 +0000 UTC m=+1376.716836199" observedRunningTime="2025-10-02 11:15:45.650772765 +0000 UTC m=+1378.966967036" watchObservedRunningTime="2025-10-02 11:15:45.653948932 +0000 UTC m=+1378.970143203" Oct 02 11:15:48 crc kubenswrapper[4783]: I1002 11:15:48.628584 4783 generic.go:334] "Generic (PLEG): container finished" podID="9e4fb56d-2565-4383-a883-a0c1eae40cb4" containerID="6065cae19bc7b3d54b1ea3462555fa77433d6d20d53fc623121834b0b2926859" exitCode=0 Oct 02 11:15:48 crc kubenswrapper[4783]: I1002 11:15:48.628659 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqsnf" event={"ID":"9e4fb56d-2565-4383-a883-a0c1eae40cb4","Type":"ContainerDied","Data":"6065cae19bc7b3d54b1ea3462555fa77433d6d20d53fc623121834b0b2926859"} Oct 02 11:15:48 crc kubenswrapper[4783]: I1002 11:15:48.630449 4783 generic.go:334] "Generic (PLEG): container finished" podID="479a79a2-f65b-443b-865a-bec4c138b978" containerID="31ba9fc88407b90063a3b07e92a4f172cd98c0503845642ff22552a9d5ec1cdf" exitCode=0 Oct 02 11:15:48 crc kubenswrapper[4783]: I1002 11:15:48.630492 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bh7g4" event={"ID":"479a79a2-f65b-443b-865a-bec4c138b978","Type":"ContainerDied","Data":"31ba9fc88407b90063a3b07e92a4f172cd98c0503845642ff22552a9d5ec1cdf"} Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.143663 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.151241 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.175682 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9e4fb56d-2565-4383-a883-a0c1eae40cb4-etc-machine-id\") pod \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.175818 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-config\") pod \"479a79a2-f65b-443b-865a-bec4c138b978\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.175889 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jscxl\" (UniqueName: \"kubernetes.io/projected/9e4fb56d-2565-4383-a883-a0c1eae40cb4-kube-api-access-jscxl\") pod \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.175968 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-db-sync-config-data\") pod \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.176018 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-scripts\") pod \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.176049 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-config-data\") pod \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.176070 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-combined-ca-bundle\") pod \"479a79a2-f65b-443b-865a-bec4c138b978\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.176141 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-combined-ca-bundle\") pod \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\" (UID: \"9e4fb56d-2565-4383-a883-a0c1eae40cb4\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.176174 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-plkf2\" (UniqueName: \"kubernetes.io/projected/479a79a2-f65b-443b-865a-bec4c138b978-kube-api-access-plkf2\") pod \"479a79a2-f65b-443b-865a-bec4c138b978\" (UID: \"479a79a2-f65b-443b-865a-bec4c138b978\") " Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.180046 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9e4fb56d-2565-4383-a883-a0c1eae40cb4-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "9e4fb56d-2565-4383-a883-a0c1eae40cb4" (UID: "9e4fb56d-2565-4383-a883-a0c1eae40cb4"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.193156 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-scripts" (OuterVolumeSpecName: "scripts") pod "9e4fb56d-2565-4383-a883-a0c1eae40cb4" (UID: "9e4fb56d-2565-4383-a883-a0c1eae40cb4"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.196594 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e4fb56d-2565-4383-a883-a0c1eae40cb4-kube-api-access-jscxl" (OuterVolumeSpecName: "kube-api-access-jscxl") pod "9e4fb56d-2565-4383-a883-a0c1eae40cb4" (UID: "9e4fb56d-2565-4383-a883-a0c1eae40cb4"). InnerVolumeSpecName "kube-api-access-jscxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.207677 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/479a79a2-f65b-443b-865a-bec4c138b978-kube-api-access-plkf2" (OuterVolumeSpecName: "kube-api-access-plkf2") pod "479a79a2-f65b-443b-865a-bec4c138b978" (UID: "479a79a2-f65b-443b-865a-bec4c138b978"). InnerVolumeSpecName "kube-api-access-plkf2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.207771 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "9e4fb56d-2565-4383-a883-a0c1eae40cb4" (UID: "9e4fb56d-2565-4383-a883-a0c1eae40cb4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.211606 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.281012 4783 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.281046 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.281058 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-plkf2\" (UniqueName: \"kubernetes.io/projected/479a79a2-f65b-443b-865a-bec4c138b978-kube-api-access-plkf2\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.281070 4783 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/9e4fb56d-2565-4383-a883-a0c1eae40cb4-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.281079 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jscxl\" (UniqueName: \"kubernetes.io/projected/9e4fb56d-2565-4383-a883-a0c1eae40cb4-kube-api-access-jscxl\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.285742 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-config-data" (OuterVolumeSpecName: "config-data") pod "9e4fb56d-2565-4383-a883-a0c1eae40cb4" (UID: "9e4fb56d-2565-4383-a883-a0c1eae40cb4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.294277 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "479a79a2-f65b-443b-865a-bec4c138b978" (UID: "479a79a2-f65b-443b-865a-bec4c138b978"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.294884 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "9e4fb56d-2565-4383-a883-a0c1eae40cb4" (UID: "9e4fb56d-2565-4383-a883-a0c1eae40cb4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.320636 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-clk5x"] Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.322228 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" podUID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerName="dnsmasq-dns" containerID="cri-o://f197b1f9f36ef3599662bf9bb2a4565c7fda88d7193ca8963ecb1d6a49c72956" gracePeriod=10 Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.331941 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-config" (OuterVolumeSpecName: "config") pod "479a79a2-f65b-443b-865a-bec4c138b978" (UID: "479a79a2-f65b-443b-865a-bec4c138b978"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.382753 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.382780 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.382789 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/479a79a2-f65b-443b-865a-bec4c138b978-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.382802 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/9e4fb56d-2565-4383-a883-a0c1eae40cb4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.655060 4783 generic.go:334] "Generic (PLEG): container finished" podID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerID="f197b1f9f36ef3599662bf9bb2a4565c7fda88d7193ca8963ecb1d6a49c72956" exitCode=0 Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.655206 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" event={"ID":"d488b88d-66e6-40fc-a29e-ee119ebb5e40","Type":"ContainerDied","Data":"f197b1f9f36ef3599662bf9bb2a4565c7fda88d7193ca8963ecb1d6a49c72956"} Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.667896 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-bh7g4" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.670103 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-bh7g4" event={"ID":"479a79a2-f65b-443b-865a-bec4c138b978","Type":"ContainerDied","Data":"170b4f835207ff2aa61e9dcb981ed0b184ac7de2ea476faed698a38b3f43232a"} Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.670139 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="170b4f835207ff2aa61e9dcb981ed0b184ac7de2ea476faed698a38b3f43232a" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.717735 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-sqsnf" event={"ID":"9e4fb56d-2565-4383-a883-a0c1eae40cb4","Type":"ContainerDied","Data":"40f54dcf67c75a81b071081b86e4f29463caca99f8439de48b5c967ed2c6db53"} Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.717773 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="40f54dcf67c75a81b071081b86e4f29463caca99f8439de48b5c967ed2c6db53" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.717837 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-sqsnf" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.961060 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-47nl5"] Oct 02 11:15:50 crc kubenswrapper[4783]: E1002 11:15:50.961452 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e4fb56d-2565-4383-a883-a0c1eae40cb4" containerName="cinder-db-sync" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.961468 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e4fb56d-2565-4383-a883-a0c1eae40cb4" containerName="cinder-db-sync" Oct 02 11:15:50 crc kubenswrapper[4783]: E1002 11:15:50.961488 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="479a79a2-f65b-443b-865a-bec4c138b978" containerName="neutron-db-sync" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.961495 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="479a79a2-f65b-443b-865a-bec4c138b978" containerName="neutron-db-sync" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.961669 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e4fb56d-2565-4383-a883-a0c1eae40cb4" containerName="cinder-db-sync" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.961701 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="479a79a2-f65b-443b-865a-bec4c138b978" containerName="neutron-db-sync" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.973244 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:50 crc kubenswrapper[4783]: I1002 11:15:50.987757 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-47nl5"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.001486 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-config\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.001568 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.001613 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.001656 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kq6pk\" (UniqueName: \"kubernetes.io/projected/4fc8f318-4f05-4ada-be59-c258785a63bb-kube-api-access-kq6pk\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.001678 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.001703 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.041136 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.042501 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.097180 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.097352 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.097365 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.097555 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-bkgxf" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.166216 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.166268 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2mxkr\" (UniqueName: \"kubernetes.io/projected/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-kube-api-access-2mxkr\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.166296 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.166340 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kq6pk\" (UniqueName: \"kubernetes.io/projected/4fc8f318-4f05-4ada-be59-c258785a63bb-kube-api-access-kq6pk\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.166359 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.166386 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.170465 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-scripts\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.170643 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-config\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.170708 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.170742 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.170772 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.170846 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.171664 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-swift-storage-0\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.171729 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-config\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.172202 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-nb\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.172748 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-sb\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.176610 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-svc\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.240525 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.273175 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kq6pk\" (UniqueName: \"kubernetes.io/projected/4fc8f318-4f05-4ada-be59-c258785a63bb-kube-api-access-kq6pk\") pod \"dnsmasq-dns-75c8ddd69c-47nl5\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.283275 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.283928 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2mxkr\" (UniqueName: \"kubernetes.io/projected/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-kube-api-access-2mxkr\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.284456 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.284832 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-scripts\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.286048 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.286662 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.287876 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-scripts\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.288009 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.303368 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.311743 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.314915 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.317526 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-6f9ccfdf84-lzmhg"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.319074 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.320483 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2mxkr\" (UniqueName: \"kubernetes.io/projected/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-kube-api-access-2mxkr\") pod \"cinder-scheduler-0\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.341283 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.341528 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.341721 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-7rp2v" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.341895 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.342520 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6f9ccfdf84-lzmhg"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.385478 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.387539 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-47nl5"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.416734 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.422487 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-4jjjq"] Oct 02 11:15:51 crc kubenswrapper[4783]: E1002 11:15:51.422908 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerName="init" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.422918 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerName="init" Oct 02 11:15:51 crc kubenswrapper[4783]: E1002 11:15:51.422943 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerName="dnsmasq-dns" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.422949 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerName="dnsmasq-dns" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.423119 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" containerName="dnsmasq-dns" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.424039 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.447670 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-4jjjq"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.496331 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.501511 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mjtl8\" (UniqueName: \"kubernetes.io/projected/1b74946e-4754-418a-a8cd-30512a841704-kube-api-access-mjtl8\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.501578 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-httpd-config\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.501613 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-ovndb-tls-certs\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.501640 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-config\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.501714 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-combined-ca-bundle\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.602783 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-swift-storage-0\") pod \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.602857 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-sb\") pod \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603002 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-nb\") pod \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603053 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-svc\") pod \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603079 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config\") pod \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603104 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-spr72\" (UniqueName: \"kubernetes.io/projected/d488b88d-66e6-40fc-a29e-ee119ebb5e40-kube-api-access-spr72\") pod \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603335 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603373 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-combined-ca-bundle\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603397 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603468 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603511 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mjtl8\" (UniqueName: \"kubernetes.io/projected/1b74946e-4754-418a-a8cd-30512a841704-kube-api-access-mjtl8\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603536 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-config\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603552 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-httpd-config\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603573 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zbwjt\" (UniqueName: \"kubernetes.io/projected/9340097f-c1f0-4fd6-81eb-155ebf4a319d-kube-api-access-zbwjt\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603594 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-ovndb-tls-certs\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603611 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-svc\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.603630 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-config\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.605654 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.607118 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.621113 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-httpd-config\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.621709 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.622153 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.626375 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-config\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.655356 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d488b88d-66e6-40fc-a29e-ee119ebb5e40-kube-api-access-spr72" (OuterVolumeSpecName: "kube-api-access-spr72") pod "d488b88d-66e6-40fc-a29e-ee119ebb5e40" (UID: "d488b88d-66e6-40fc-a29e-ee119ebb5e40"). InnerVolumeSpecName "kube-api-access-spr72". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.655994 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-ovndb-tls-certs\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.660319 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-combined-ca-bundle\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.683074 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mjtl8\" (UniqueName: \"kubernetes.io/projected/1b74946e-4754-418a-a8cd-30512a841704-kube-api-access-mjtl8\") pod \"neutron-6f9ccfdf84-lzmhg\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705632 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705704 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/09e5ac64-2f67-4af3-bf5f-9628fead6591-etc-machine-id\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705735 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705782 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705858 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t2bc6\" (UniqueName: \"kubernetes.io/projected/09e5ac64-2f67-4af3-bf5f-9628fead6591-kube-api-access-t2bc6\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705893 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705946 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-config\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.705973 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zbwjt\" (UniqueName: \"kubernetes.io/projected/9340097f-c1f0-4fd6-81eb-155ebf4a319d-kube-api-access-zbwjt\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.706009 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-svc\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.706081 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09e5ac64-2f67-4af3-bf5f-9628fead6591-logs\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.706105 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data-custom\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.706130 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.706147 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-scripts\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.706299 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-spr72\" (UniqueName: \"kubernetes.io/projected/d488b88d-66e6-40fc-a29e-ee119ebb5e40-kube-api-access-spr72\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.707746 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-nb\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.708253 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-swift-storage-0\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.708751 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-svc\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.709226 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-config\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.709998 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-sb\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.736570 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.765216 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "d488b88d-66e6-40fc-a29e-ee119ebb5e40" (UID: "d488b88d-66e6-40fc-a29e-ee119ebb5e40"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.774852 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" event={"ID":"d488b88d-66e6-40fc-a29e-ee119ebb5e40","Type":"ContainerDied","Data":"70720cc87c413f7f274df859330451a2ad62e102ce5ad0ca61a41a7c7c2dcb70"} Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.774898 4783 scope.go:117] "RemoveContainer" containerID="f197b1f9f36ef3599662bf9bb2a4565c7fda88d7193ca8963ecb1d6a49c72956" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.775067 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8b5c85b87-clk5x" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.793545 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zbwjt\" (UniqueName: \"kubernetes.io/projected/9340097f-c1f0-4fd6-81eb-155ebf4a319d-kube-api-access-zbwjt\") pod \"dnsmasq-dns-5784cf869f-4jjjq\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.811236 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "d488b88d-66e6-40fc-a29e-ee119ebb5e40" (UID: "d488b88d-66e6-40fc-a29e-ee119ebb5e40"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.812242 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09e5ac64-2f67-4af3-bf5f-9628fead6591-logs\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.812274 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data-custom\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815094 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09e5ac64-2f67-4af3-bf5f-9628fead6591-logs\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815162 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-scripts\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815274 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/09e5ac64-2f67-4af3-bf5f-9628fead6591-etc-machine-id\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815298 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815474 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t2bc6\" (UniqueName: \"kubernetes.io/projected/09e5ac64-2f67-4af3-bf5f-9628fead6591-kube-api-access-t2bc6\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815511 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815655 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.815669 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.817394 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/09e5ac64-2f67-4af3-bf5f-9628fead6591-etc-machine-id\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.826057 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.853038 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data-custom\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.854399 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-scripts\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.873116 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t2bc6\" (UniqueName: \"kubernetes.io/projected/09e5ac64-2f67-4af3-bf5f-9628fead6591-kube-api-access-t2bc6\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.873543 4783 scope.go:117] "RemoveContainer" containerID="d4aecfc3bdc72e2b4e8b460a13d3dc06ba0535ba98016466dbff4c2df0da31ce" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.926996 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config" (OuterVolumeSpecName: "config") pod "d488b88d-66e6-40fc-a29e-ee119ebb5e40" (UID: "d488b88d-66e6-40fc-a29e-ee119ebb5e40"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.929192 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data\") pod \"cinder-api-0\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " pod="openstack/cinder-api-0" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.929607 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config\") pod \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\" (UID: \"d488b88d-66e6-40fc-a29e-ee119ebb5e40\") " Oct 02 11:15:51 crc kubenswrapper[4783]: W1002 11:15:51.932340 4783 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/d488b88d-66e6-40fc-a29e-ee119ebb5e40/volumes/kubernetes.io~configmap/config Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.932358 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config" (OuterVolumeSpecName: "config") pod "d488b88d-66e6-40fc-a29e-ee119ebb5e40" (UID: "d488b88d-66e6-40fc-a29e-ee119ebb5e40"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.946769 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "d488b88d-66e6-40fc-a29e-ee119ebb5e40" (UID: "d488b88d-66e6-40fc-a29e-ee119ebb5e40"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:51 crc kubenswrapper[4783]: I1002 11:15:51.947077 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "d488b88d-66e6-40fc-a29e-ee119ebb5e40" (UID: "d488b88d-66e6-40fc-a29e-ee119ebb5e40"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.008881 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.035633 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.035664 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.035674 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/d488b88d-66e6-40fc-a29e-ee119ebb5e40-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.062604 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.404044 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-clk5x"] Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.439674 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8b5c85b87-clk5x"] Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.459792 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.478926 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.601088 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-47nl5"] Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.891048 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a","Type":"ContainerStarted","Data":"9d09ded9de9dccbd3674ce785e5846361f2dd0649c7a423ca883da4f72059af1"} Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.891961 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-4jjjq"] Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.908606 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" event={"ID":"4fc8f318-4f05-4ada-be59-c258785a63bb","Type":"ContainerStarted","Data":"db4c5a2fec82b44ac4c3de44a768351e75e0ce690dfd16900181730a4ea65252"} Oct 02 11:15:52 crc kubenswrapper[4783]: I1002 11:15:52.937930 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:15:53 crc kubenswrapper[4783]: I1002 11:15:53.061694 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-6f9ccfdf84-lzmhg"] Oct 02 11:15:53 crc kubenswrapper[4783]: I1002 11:15:53.182919 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:15:53 crc kubenswrapper[4783]: I1002 11:15:53.620978 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d488b88d-66e6-40fc-a29e-ee119ebb5e40" path="/var/lib/kubelet/pods/d488b88d-66e6-40fc-a29e-ee119ebb5e40/volumes" Oct 02 11:15:53 crc kubenswrapper[4783]: I1002 11:15:53.968074 4783 generic.go:334] "Generic (PLEG): container finished" podID="4fc8f318-4f05-4ada-be59-c258785a63bb" containerID="f32bade4f25b8974536e9df49cf73283ce470904c5c6166ae585beb911790d62" exitCode=0 Oct 02 11:15:53 crc kubenswrapper[4783]: I1002 11:15:53.968451 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" event={"ID":"4fc8f318-4f05-4ada-be59-c258785a63bb","Type":"ContainerDied","Data":"f32bade4f25b8974536e9df49cf73283ce470904c5c6166ae585beb911790d62"} Oct 02 11:15:53 crc kubenswrapper[4783]: I1002 11:15:53.980302 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f9ccfdf84-lzmhg" event={"ID":"1b74946e-4754-418a-a8cd-30512a841704","Type":"ContainerStarted","Data":"861615351fe69ffff68fa1bcc5227cc1fabe6d6fef29df981f56bfd566fb6f87"} Oct 02 11:15:53 crc kubenswrapper[4783]: I1002 11:15:53.981666 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f9ccfdf84-lzmhg" event={"ID":"1b74946e-4754-418a-a8cd-30512a841704","Type":"ContainerStarted","Data":"fb557630c1aff6034fafe14349765ccb74ab2c00afb100fd5d638a6de582e9ef"} Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.004887 4783 generic.go:334] "Generic (PLEG): container finished" podID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerID="58153738b4583103c9b6b2a35fefcf7fa1e4c224a6092baa570c4337ad49f3cb" exitCode=137 Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.004961 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerDied","Data":"58153738b4583103c9b6b2a35fefcf7fa1e4c224a6092baa570c4337ad49f3cb"} Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.006666 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" event={"ID":"9340097f-c1f0-4fd6-81eb-155ebf4a319d","Type":"ContainerStarted","Data":"d540ae7e371dd17bff3e211ce3ef7df13ce0702176935191ff6097ff62dce1a5"} Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.007396 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"09e5ac64-2f67-4af3-bf5f-9628fead6591","Type":"ContainerStarted","Data":"67194c0c81fa2bcdf9986a477d7941da3fb21a340622b7f224f42fcffac27d1b"} Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.594457 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.669063 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-config\") pod \"4fc8f318-4f05-4ada-be59-c258785a63bb\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.669122 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-nb\") pod \"4fc8f318-4f05-4ada-be59-c258785a63bb\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.669203 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kq6pk\" (UniqueName: \"kubernetes.io/projected/4fc8f318-4f05-4ada-be59-c258785a63bb-kube-api-access-kq6pk\") pod \"4fc8f318-4f05-4ada-be59-c258785a63bb\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.669274 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-svc\") pod \"4fc8f318-4f05-4ada-be59-c258785a63bb\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.669305 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-swift-storage-0\") pod \"4fc8f318-4f05-4ada-be59-c258785a63bb\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.669364 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-sb\") pod \"4fc8f318-4f05-4ada-be59-c258785a63bb\" (UID: \"4fc8f318-4f05-4ada-be59-c258785a63bb\") " Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.700818 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4fc8f318-4f05-4ada-be59-c258785a63bb-kube-api-access-kq6pk" (OuterVolumeSpecName: "kube-api-access-kq6pk") pod "4fc8f318-4f05-4ada-be59-c258785a63bb" (UID: "4fc8f318-4f05-4ada-be59-c258785a63bb"). InnerVolumeSpecName "kube-api-access-kq6pk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.709596 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-config" (OuterVolumeSpecName: "config") pod "4fc8f318-4f05-4ada-be59-c258785a63bb" (UID: "4fc8f318-4f05-4ada-be59-c258785a63bb"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.740606 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "4fc8f318-4f05-4ada-be59-c258785a63bb" (UID: "4fc8f318-4f05-4ada-be59-c258785a63bb"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.744020 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "4fc8f318-4f05-4ada-be59-c258785a63bb" (UID: "4fc8f318-4f05-4ada-be59-c258785a63bb"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.744016 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "4fc8f318-4f05-4ada-be59-c258785a63bb" (UID: "4fc8f318-4f05-4ada-be59-c258785a63bb"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.746033 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "4fc8f318-4f05-4ada-be59-c258785a63bb" (UID: "4fc8f318-4f05-4ada-be59-c258785a63bb"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.771009 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kq6pk\" (UniqueName: \"kubernetes.io/projected/4fc8f318-4f05-4ada-be59-c258785a63bb-kube-api-access-kq6pk\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.771034 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.771042 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.771050 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.771058 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:54 crc kubenswrapper[4783]: I1002 11:15:54.771066 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/4fc8f318-4f05-4ada-be59-c258785a63bb-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.017218 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerStarted","Data":"eaaf5ac1ab8de4cd11862bbd9387b781b9c059890b204852bc3fb6e13f34a239"} Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.019295 4783 generic.go:334] "Generic (PLEG): container finished" podID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerID="3c43d0f89df3267dbde8f25a2198937a6e0c20d55dfad6ff3b5986245e37c978" exitCode=0 Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.019372 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" event={"ID":"9340097f-c1f0-4fd6-81eb-155ebf4a319d","Type":"ContainerDied","Data":"3c43d0f89df3267dbde8f25a2198937a6e0c20d55dfad6ff3b5986245e37c978"} Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.021125 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a","Type":"ContainerStarted","Data":"8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4"} Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.024564 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.024561 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75c8ddd69c-47nl5" event={"ID":"4fc8f318-4f05-4ada-be59-c258785a63bb","Type":"ContainerDied","Data":"db4c5a2fec82b44ac4c3de44a768351e75e0ce690dfd16900181730a4ea65252"} Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.026522 4783 scope.go:117] "RemoveContainer" containerID="f32bade4f25b8974536e9df49cf73283ce470904c5c6166ae585beb911790d62" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.037873 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f9ccfdf84-lzmhg" event={"ID":"1b74946e-4754-418a-a8cd-30512a841704","Type":"ContainerStarted","Data":"3cf11c98a87f9759ba0d6875b34c45e1630062d3a871ae3a48da137df6dbd021"} Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.038788 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.126248 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-6f9ccfdf84-lzmhg" podStartSLOduration=4.1262248 podStartE2EDuration="4.1262248s" podCreationTimestamp="2025-10-02 11:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:55.096201318 +0000 UTC m=+1388.412395579" watchObservedRunningTime="2025-10-02 11:15:55.1262248 +0000 UTC m=+1388.442419061" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.308458 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-47nl5"] Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.343048 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75c8ddd69c-47nl5"] Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.463832 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-77768f4b8b-q72bm" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.464079 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-77768f4b8b-q72bm" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.515726 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.571626 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4fc8f318-4f05-4ada-be59-c258785a63bb" path="/var/lib/kubelet/pods/4fc8f318-4f05-4ada-be59-c258785a63bb/volumes" Oct 02 11:15:55 crc kubenswrapper[4783]: I1002 11:15:55.572313 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:15:56 crc kubenswrapper[4783]: I1002 11:15:56.061955 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" event={"ID":"9340097f-c1f0-4fd6-81eb-155ebf4a319d","Type":"ContainerStarted","Data":"f6821a9aeb15d7f710351a9e77a6842c399fce71c8e916d6156833251456b072"} Oct 02 11:15:56 crc kubenswrapper[4783]: I1002 11:15:56.062490 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:15:56 crc kubenswrapper[4783]: I1002 11:15:56.064658 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"09e5ac64-2f67-4af3-bf5f-9628fead6591","Type":"ContainerStarted","Data":"25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e"} Oct 02 11:15:56 crc kubenswrapper[4783]: I1002 11:15:56.124071 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" podStartSLOduration=5.12404847 podStartE2EDuration="5.12404847s" podCreationTimestamp="2025-10-02 11:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:56.115676571 +0000 UTC m=+1389.431870832" watchObservedRunningTime="2025-10-02 11:15:56.12404847 +0000 UTC m=+1389.440242731" Oct 02 11:15:56 crc kubenswrapper[4783]: I1002 11:15:56.638034 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.077081 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a","Type":"ContainerStarted","Data":"50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03"} Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.079845 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"09e5ac64-2f67-4af3-bf5f-9628fead6591","Type":"ContainerStarted","Data":"5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e"} Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.079975 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api-log" containerID="cri-o://25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e" gracePeriod=30 Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.080081 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api" containerID="cri-o://5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e" gracePeriod=30 Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.080309 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.100858 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.090001958 podStartE2EDuration="7.100830094s" podCreationTimestamp="2025-10-02 11:15:50 +0000 UTC" firstStartedPulling="2025-10-02 11:15:52.478744828 +0000 UTC m=+1385.794939089" lastFinishedPulling="2025-10-02 11:15:53.489572964 +0000 UTC m=+1386.805767225" observedRunningTime="2025-10-02 11:15:57.095006285 +0000 UTC m=+1390.411200546" watchObservedRunningTime="2025-10-02 11:15:57.100830094 +0000 UTC m=+1390.417024355" Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.137459 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=6.137434216 podStartE2EDuration="6.137434216s" podCreationTimestamp="2025-10-02 11:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:15:57.132982984 +0000 UTC m=+1390.449177245" watchObservedRunningTime="2025-10-02 11:15:57.137434216 +0000 UTC m=+1390.453628487" Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.761717 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 02 11:15:57 crc kubenswrapper[4783]: I1002 11:15:57.942585 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5bb7dfc68d-7dl59" podUID="9f9ca941-0e76-41d9-b0c0-7a4dada2640b" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.159:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:15:58 crc kubenswrapper[4783]: I1002 11:15:58.090317 4783 generic.go:334] "Generic (PLEG): container finished" podID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerID="25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e" exitCode=143 Oct 02 11:15:58 crc kubenswrapper[4783]: I1002 11:15:58.090402 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"09e5ac64-2f67-4af3-bf5f-9628fead6591","Type":"ContainerDied","Data":"25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e"} Oct 02 11:15:58 crc kubenswrapper[4783]: I1002 11:15:58.658598 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-77768f4b8b-q72bm" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:15:58 crc kubenswrapper[4783]: I1002 11:15:58.765665 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-77768f4b8b-q72bm" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.158:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.254563 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5bb7dfc68d-7dl59" podUID="9f9ca941-0e76-41d9-b0c0-7a4dada2640b" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.159:9311/healthcheck\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.288212 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7c59bccbcc-jsxj5"] Oct 02 11:15:59 crc kubenswrapper[4783]: E1002 11:15:59.288611 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4fc8f318-4f05-4ada-be59-c258785a63bb" containerName="init" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.288630 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4fc8f318-4f05-4ada-be59-c258785a63bb" containerName="init" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.288827 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4fc8f318-4f05-4ada-be59-c258785a63bb" containerName="init" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.289717 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.296023 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.296647 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.332810 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7c59bccbcc-jsxj5"] Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.354569 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-httpd-config\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.354627 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-ovndb-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.354669 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnpz5\" (UniqueName: \"kubernetes.io/projected/a26366ce-bca3-4af1-93ec-73a5d4a67705-kube-api-access-bnpz5\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.354706 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-public-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.354754 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-combined-ca-bundle\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.354789 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-internal-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.354848 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-config\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.456622 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-config\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.456760 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-httpd-config\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.456784 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-ovndb-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.456813 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnpz5\" (UniqueName: \"kubernetes.io/projected/a26366ce-bca3-4af1-93ec-73a5d4a67705-kube-api-access-bnpz5\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.456851 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-public-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.456896 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-combined-ca-bundle\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.456931 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-internal-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.463226 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-public-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.463313 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-ovndb-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.464811 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-httpd-config\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.465999 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-internal-tls-certs\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.471943 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-combined-ca-bundle\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.484114 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnpz5\" (UniqueName: \"kubernetes.io/projected/a26366ce-bca3-4af1-93ec-73a5d4a67705-kube-api-access-bnpz5\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.487281 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/a26366ce-bca3-4af1-93ec-73a5d4a67705-config\") pod \"neutron-7c59bccbcc-jsxj5\" (UID: \"a26366ce-bca3-4af1-93ec-73a5d4a67705\") " pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:15:59 crc kubenswrapper[4783]: I1002 11:15:59.619016 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:16:00 crc kubenswrapper[4783]: I1002 11:16:00.370458 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7c59bccbcc-jsxj5"] Oct 02 11:16:00 crc kubenswrapper[4783]: I1002 11:16:00.592161 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5bb7dfc68d-7dl59" Oct 02 11:16:00 crc kubenswrapper[4783]: I1002 11:16:00.668347 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-77768f4b8b-q72bm"] Oct 02 11:16:00 crc kubenswrapper[4783]: I1002 11:16:00.668918 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-77768f4b8b-q72bm" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api-log" containerID="cri-o://42dbf0d885c2f45bc7c336d620f72044895639ebf18513403d8d9842f3988e6b" gracePeriod=30 Oct 02 11:16:00 crc kubenswrapper[4783]: I1002 11:16:00.669167 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-77768f4b8b-q72bm" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api" containerID="cri-o://8589ca3751f97966b896bc74baed6713e4a70d9e4ad200e250db9c7cf485015a" gracePeriod=30 Oct 02 11:16:01 crc kubenswrapper[4783]: I1002 11:16:01.122814 4783 generic.go:334] "Generic (PLEG): container finished" podID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerID="42dbf0d885c2f45bc7c336d620f72044895639ebf18513403d8d9842f3988e6b" exitCode=143 Oct 02 11:16:01 crc kubenswrapper[4783]: I1002 11:16:01.123099 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77768f4b8b-q72bm" event={"ID":"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a","Type":"ContainerDied","Data":"42dbf0d885c2f45bc7c336d620f72044895639ebf18513403d8d9842f3988e6b"} Oct 02 11:16:01 crc kubenswrapper[4783]: I1002 11:16:01.126721 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c59bccbcc-jsxj5" event={"ID":"a26366ce-bca3-4af1-93ec-73a5d4a67705","Type":"ContainerStarted","Data":"3c612854e071be1647d111713e830004cf9c4f2f5f2162cae9a73e4cf8440a7c"} Oct 02 11:16:01 crc kubenswrapper[4783]: I1002 11:16:01.126767 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c59bccbcc-jsxj5" event={"ID":"a26366ce-bca3-4af1-93ec-73a5d4a67705","Type":"ContainerStarted","Data":"7cef662811c8ac35979952933e6013cd24f2724acd10750cd97e188283e1c1d1"} Oct 02 11:16:01 crc kubenswrapper[4783]: I1002 11:16:01.500576 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 02 11:16:01 crc kubenswrapper[4783]: I1002 11:16:01.807810 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/cinder-scheduler-0" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="cinder-scheduler" probeResult="failure" output="HTTP probe failed with statuscode: 500" Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.065589 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.139801 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-2c72r"] Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.140087 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" podUID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerName="dnsmasq-dns" containerID="cri-o://2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2" gracePeriod=10 Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.189453 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7c59bccbcc-jsxj5" event={"ID":"a26366ce-bca3-4af1-93ec-73a5d4a67705","Type":"ContainerStarted","Data":"43fa503c6100805269a975360207a8c34d5a642511277a2172f92125df940f48"} Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.190603 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.228262 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7c59bccbcc-jsxj5" podStartSLOduration=3.228236651 podStartE2EDuration="3.228236651s" podCreationTimestamp="2025-10-02 11:15:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:16:02.21723985 +0000 UTC m=+1395.533434111" watchObservedRunningTime="2025-10-02 11:16:02.228236651 +0000 UTC m=+1395.544430922" Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.596785 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-6c6c7fffd4-bbhdp" Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.836853 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.940222 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-swift-storage-0\") pod \"149e251f-f012-4120-ac20-bbd76bf0fcd3\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.940354 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-svc\") pod \"149e251f-f012-4120-ac20-bbd76bf0fcd3\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.940535 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-config\") pod \"149e251f-f012-4120-ac20-bbd76bf0fcd3\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.940572 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-nb\") pod \"149e251f-f012-4120-ac20-bbd76bf0fcd3\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.940601 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2lnrb\" (UniqueName: \"kubernetes.io/projected/149e251f-f012-4120-ac20-bbd76bf0fcd3-kube-api-access-2lnrb\") pod \"149e251f-f012-4120-ac20-bbd76bf0fcd3\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.940714 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-sb\") pod \"149e251f-f012-4120-ac20-bbd76bf0fcd3\" (UID: \"149e251f-f012-4120-ac20-bbd76bf0fcd3\") " Oct 02 11:16:02 crc kubenswrapper[4783]: I1002 11:16:02.954252 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/149e251f-f012-4120-ac20-bbd76bf0fcd3-kube-api-access-2lnrb" (OuterVolumeSpecName: "kube-api-access-2lnrb") pod "149e251f-f012-4120-ac20-bbd76bf0fcd3" (UID: "149e251f-f012-4120-ac20-bbd76bf0fcd3"). InnerVolumeSpecName "kube-api-access-2lnrb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.037868 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "149e251f-f012-4120-ac20-bbd76bf0fcd3" (UID: "149e251f-f012-4120-ac20-bbd76bf0fcd3"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.044628 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.044654 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2lnrb\" (UniqueName: \"kubernetes.io/projected/149e251f-f012-4120-ac20-bbd76bf0fcd3-kube-api-access-2lnrb\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.054673 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "149e251f-f012-4120-ac20-bbd76bf0fcd3" (UID: "149e251f-f012-4120-ac20-bbd76bf0fcd3"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.054715 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "149e251f-f012-4120-ac20-bbd76bf0fcd3" (UID: "149e251f-f012-4120-ac20-bbd76bf0fcd3"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.069862 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-config" (OuterVolumeSpecName: "config") pod "149e251f-f012-4120-ac20-bbd76bf0fcd3" (UID: "149e251f-f012-4120-ac20-bbd76bf0fcd3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.114872 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.114900 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "149e251f-f012-4120-ac20-bbd76bf0fcd3" (UID: "149e251f-f012-4120-ac20-bbd76bf0fcd3"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.114919 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.117552 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:16:03 crc kubenswrapper[4783]: W1002 11:16:03.123662 4783 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-conmon-25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-conmon-25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e.scope: no such file or directory Oct 02 11:16:03 crc kubenswrapper[4783]: W1002 11:16:03.123730 4783 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e.scope: no such file or directory Oct 02 11:16:03 crc kubenswrapper[4783]: W1002 11:16:03.125512 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fc8f318_4f05_4ada_be59_c258785a63bb.slice/crio-f32bade4f25b8974536e9df49cf73283ce470904c5c6166ae585beb911790d62.scope WatchSource:0}: Error finding container f32bade4f25b8974536e9df49cf73283ce470904c5c6166ae585beb911790d62: Status 404 returned error can't find the container with id f32bade4f25b8974536e9df49cf73283ce470904c5c6166ae585beb911790d62 Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.146142 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.146181 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.146196 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.146208 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/149e251f-f012-4120-ac20-bbd76bf0fcd3-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.199370 4783 generic.go:334] "Generic (PLEG): container finished" podID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerID="2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2" exitCode=0 Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.200605 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.204915 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" event={"ID":"149e251f-f012-4120-ac20-bbd76bf0fcd3","Type":"ContainerDied","Data":"2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2"} Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.204986 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59d5ff467f-2c72r" event={"ID":"149e251f-f012-4120-ac20-bbd76bf0fcd3","Type":"ContainerDied","Data":"3cbbc3bc4598e0085effb3513ed5ba3df96286607dc628b8674e2c535c79745d"} Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.205014 4783 scope.go:117] "RemoveContainer" containerID="2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.237333 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-2c72r"] Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.263619 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59d5ff467f-2c72r"] Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.316925 4783 scope.go:117] "RemoveContainer" containerID="4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.346377 4783 scope.go:117] "RemoveContainer" containerID="2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2" Oct 02 11:16:03 crc kubenswrapper[4783]: E1002 11:16:03.346766 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2\": container with ID starting with 2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2 not found: ID does not exist" containerID="2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.346793 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2"} err="failed to get container status \"2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2\": rpc error: code = NotFound desc = could not find container \"2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2\": container with ID starting with 2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2 not found: ID does not exist" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.346814 4783 scope.go:117] "RemoveContainer" containerID="4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4" Oct 02 11:16:03 crc kubenswrapper[4783]: E1002 11:16:03.348683 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4\": container with ID starting with 4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4 not found: ID does not exist" containerID="4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.348815 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4"} err="failed to get container status \"4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4\": rpc error: code = NotFound desc = could not find container \"4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4\": container with ID starting with 4dd8b26313e4702d7147a6819041044facdeabeb2d5be3a9c158386fbac04db4 not found: ID does not exist" Oct 02 11:16:03 crc kubenswrapper[4783]: I1002 11:16:03.563776 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="149e251f-f012-4120-ac20-bbd76bf0fcd3" path="/var/lib/kubelet/pods/149e251f-f012-4120-ac20-bbd76bf0fcd3/volumes" Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.215904 4783 generic.go:334] "Generic (PLEG): container finished" podID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerID="8589ca3751f97966b896bc74baed6713e4a70d9e4ad200e250db9c7cf485015a" exitCode=0 Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.216003 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77768f4b8b-q72bm" event={"ID":"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a","Type":"ContainerDied","Data":"8589ca3751f97966b896bc74baed6713e4a70d9e4ad200e250db9c7cf485015a"} Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.231589 4783 generic.go:334] "Generic (PLEG): container finished" podID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerID="17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3" exitCode=137 Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.231634 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerDied","Data":"17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3"} Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.231688 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerStarted","Data":"9a79bf128cd748cac66115e7e4373946c6413ab941b85a301fa5e56d361363ac"} Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.915251 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.988546 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-combined-ca-bundle\") pod \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.988595 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-logs\") pod \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.988624 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zf5pj\" (UniqueName: \"kubernetes.io/projected/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-kube-api-access-zf5pj\") pod \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.988679 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data\") pod \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.988743 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data-custom\") pod \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\" (UID: \"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a\") " Oct 02 11:16:04 crc kubenswrapper[4783]: I1002 11:16:04.989617 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-logs" (OuterVolumeSpecName: "logs") pod "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" (UID: "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.007574 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" (UID: "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.031092 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-kube-api-access-zf5pj" (OuterVolumeSpecName: "kube-api-access-zf5pj") pod "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" (UID: "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a"). InnerVolumeSpecName "kube-api-access-zf5pj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.085869 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data" (OuterVolumeSpecName: "config-data") pod "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" (UID: "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.094755 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.094799 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zf5pj\" (UniqueName: \"kubernetes.io/projected/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-kube-api-access-zf5pj\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.094815 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.094826 4783 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.107441 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" (UID: "6f6561e9-9bc7-4cde-adda-c142e6f6eb7a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.196615 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.242026 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-77768f4b8b-q72bm" event={"ID":"6f6561e9-9bc7-4cde-adda-c142e6f6eb7a","Type":"ContainerDied","Data":"9a4faa254df25a00ca295c5e54d6fbab32b5f7ffb325f9338b45d83d8ebf2004"} Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.242066 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-77768f4b8b-q72bm" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.242073 4783 scope.go:117] "RemoveContainer" containerID="8589ca3751f97966b896bc74baed6713e4a70d9e4ad200e250db9c7cf485015a" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.269992 4783 scope.go:117] "RemoveContainer" containerID="42dbf0d885c2f45bc7c336d620f72044895639ebf18513403d8d9842f3988e6b" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.303961 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-77768f4b8b-q72bm"] Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.312967 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-77768f4b8b-q72bm"] Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.557274 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" path="/var/lib/kubelet/pods/6f6561e9-9bc7-4cde-adda-c142e6f6eb7a/volumes" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.842002 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:16:05 crc kubenswrapper[4783]: I1002 11:16:05.845731 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-6dd865cb48-8hqrj" Oct 02 11:16:06 crc kubenswrapper[4783]: I1002 11:16:06.517677 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 02 11:16:06 crc kubenswrapper[4783]: I1002 11:16:06.585281 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:16:06 crc kubenswrapper[4783]: I1002 11:16:06.994115 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.194516 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Oct 02 11:16:07 crc kubenswrapper[4783]: E1002 11:16:07.194883 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerName="dnsmasq-dns" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.194898 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerName="dnsmasq-dns" Oct 02 11:16:07 crc kubenswrapper[4783]: E1002 11:16:07.194932 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerName="init" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.194938 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerName="init" Oct 02 11:16:07 crc kubenswrapper[4783]: E1002 11:16:07.194947 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.194953 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api" Oct 02 11:16:07 crc kubenswrapper[4783]: E1002 11:16:07.194970 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api-log" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.194976 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api-log" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.195158 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="149e251f-f012-4120-ac20-bbd76bf0fcd3" containerName="dnsmasq-dns" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.195174 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api-log" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.195191 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6f6561e9-9bc7-4cde-adda-c142e6f6eb7a" containerName="barbican-api" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.195812 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.203023 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.203909 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-96qnw" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.207392 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.225756 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.236484 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-openstack-config-secret\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.236540 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8bmsz\" (UniqueName: \"kubernetes.io/projected/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-kube-api-access-8bmsz\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.236586 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-openstack-config\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.236647 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.278268 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="cinder-scheduler" containerID="cri-o://8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4" gracePeriod=30 Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.278650 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="probe" containerID="cri-o://50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03" gracePeriod=30 Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.338557 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8bmsz\" (UniqueName: \"kubernetes.io/projected/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-kube-api-access-8bmsz\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.338657 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-openstack-config\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.338737 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.338805 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-openstack-config-secret\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.341209 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-openstack-config\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.348103 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-openstack-config-secret\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.348832 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-combined-ca-bundle\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.377282 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8bmsz\" (UniqueName: \"kubernetes.io/projected/14cd84ac-799d-4243-ba1c-3d4ff4e110cc-kube-api-access-8bmsz\") pod \"openstackclient\" (UID: \"14cd84ac-799d-4243-ba1c-3d4ff4e110cc\") " pod="openstack/openstackclient" Oct 02 11:16:07 crc kubenswrapper[4783]: I1002 11:16:07.538991 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Oct 02 11:16:08 crc kubenswrapper[4783]: I1002 11:16:08.104087 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Oct 02 11:16:08 crc kubenswrapper[4783]: I1002 11:16:08.298272 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"14cd84ac-799d-4243-ba1c-3d4ff4e110cc","Type":"ContainerStarted","Data":"bb67fa068a64453d904e2314d065df680b7073518a70eba1722c7343af5c7792"} Oct 02 11:16:09 crc kubenswrapper[4783]: I1002 11:16:09.311238 4783 generic.go:334] "Generic (PLEG): container finished" podID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerID="50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03" exitCode=0 Oct 02 11:16:09 crc kubenswrapper[4783]: I1002 11:16:09.311304 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a","Type":"ContainerDied","Data":"50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03"} Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.327157 4783 generic.go:334] "Generic (PLEG): container finished" podID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerID="8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4" exitCode=0 Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.327512 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a","Type":"ContainerDied","Data":"8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4"} Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.591580 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.731088 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-combined-ca-bundle\") pod \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.731564 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data-custom\") pod \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.731681 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data\") pod \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.731773 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2mxkr\" (UniqueName: \"kubernetes.io/projected/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-kube-api-access-2mxkr\") pod \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.731965 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-scripts\") pod \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.732122 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-etc-machine-id\") pod \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\" (UID: \"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a\") " Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.732785 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" (UID: "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.740212 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-kube-api-access-2mxkr" (OuterVolumeSpecName: "kube-api-access-2mxkr") pod "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" (UID: "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a"). InnerVolumeSpecName "kube-api-access-2mxkr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.744610 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" (UID: "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.752915 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-scripts" (OuterVolumeSpecName: "scripts") pod "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" (UID: "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.802699 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" (UID: "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.836017 4783 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.836052 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.836064 4783 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.836077 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2mxkr\" (UniqueName: \"kubernetes.io/projected/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-kube-api-access-2mxkr\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.836089 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.929706 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data" (OuterVolumeSpecName: "config-data") pod "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" (UID: "d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:10 crc kubenswrapper[4783]: I1002 11:16:10.937396 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.343235 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a","Type":"ContainerDied","Data":"9d09ded9de9dccbd3674ce785e5846361f2dd0649c7a423ca883da4f72059af1"} Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.343297 4783 scope.go:117] "RemoveContainer" containerID="50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.343583 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.403611 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.439330 4783 scope.go:117] "RemoveContainer" containerID="8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.447204 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.479286 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:16:11 crc kubenswrapper[4783]: E1002 11:16:11.479932 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="probe" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.479962 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="probe" Oct 02 11:16:11 crc kubenswrapper[4783]: E1002 11:16:11.480013 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="cinder-scheduler" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.480022 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="cinder-scheduler" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.480256 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="probe" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.480302 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" containerName="cinder-scheduler" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.481617 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.490937 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.511953 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.555162 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-scripts\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.555399 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.555480 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.555551 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-config-data\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.555649 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.555759 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcc5r\" (UniqueName: \"kubernetes.io/projected/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-kube-api-access-gcc5r\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.570035 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a" path="/var/lib/kubelet/pods/d39c74b0-272d-4a58-b7ba-f5e12a7d8b1a/volumes" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.657470 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-scripts\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.657586 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.657624 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.657663 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-config-data\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.657714 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.657931 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcc5r\" (UniqueName: \"kubernetes.io/projected/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-kube-api-access-gcc5r\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.658308 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.670690 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.673166 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-scripts\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.685892 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-7ddcb47dc5-2vzlt"] Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.687745 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.690767 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-config-data\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.693060 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.693495 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.693808 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.693957 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.715715 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcc5r\" (UniqueName: \"kubernetes.io/projected/b2e9930a-1339-45f4-9a48-4b4cba57c5b5-kube-api-access-gcc5r\") pod \"cinder-scheduler-0\" (UID: \"b2e9930a-1339-45f4-9a48-4b4cba57c5b5\") " pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.720386 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7ddcb47dc5-2vzlt"] Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759368 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-config-data\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759496 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-combined-ca-bundle\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759574 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/06ff3ffc-66e4-48b6-a386-0cc72c47f104-etc-swift\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759624 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-public-tls-certs\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759663 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06ff3ffc-66e4-48b6-a386-0cc72c47f104-log-httpd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759686 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlcwd\" (UniqueName: \"kubernetes.io/projected/06ff3ffc-66e4-48b6-a386-0cc72c47f104-kube-api-access-xlcwd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759724 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06ff3ffc-66e4-48b6-a386-0cc72c47f104-run-httpd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.759761 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-internal-tls-certs\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.811211 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.862193 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-config-data\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.862299 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-combined-ca-bundle\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.862355 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/06ff3ffc-66e4-48b6-a386-0cc72c47f104-etc-swift\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.862402 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-public-tls-certs\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.862974 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06ff3ffc-66e4-48b6-a386-0cc72c47f104-log-httpd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.862998 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlcwd\" (UniqueName: \"kubernetes.io/projected/06ff3ffc-66e4-48b6-a386-0cc72c47f104-kube-api-access-xlcwd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.863042 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06ff3ffc-66e4-48b6-a386-0cc72c47f104-run-httpd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.863537 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06ff3ffc-66e4-48b6-a386-0cc72c47f104-log-httpd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.863557 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/06ff3ffc-66e4-48b6-a386-0cc72c47f104-run-httpd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.863626 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-internal-tls-certs\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.872077 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-config-data\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.881002 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-public-tls-certs\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.882612 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-internal-tls-certs\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.886401 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/06ff3ffc-66e4-48b6-a386-0cc72c47f104-combined-ca-bundle\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.889759 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/06ff3ffc-66e4-48b6-a386-0cc72c47f104-etc-swift\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:11 crc kubenswrapper[4783]: I1002 11:16:11.890309 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlcwd\" (UniqueName: \"kubernetes.io/projected/06ff3ffc-66e4-48b6-a386-0cc72c47f104-kube-api-access-xlcwd\") pod \"swift-proxy-7ddcb47dc5-2vzlt\" (UID: \"06ff3ffc-66e4-48b6-a386-0cc72c47f104\") " pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:12 crc kubenswrapper[4783]: I1002 11:16:12.127368 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:12 crc kubenswrapper[4783]: I1002 11:16:12.421104 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Oct 02 11:16:12 crc kubenswrapper[4783]: I1002 11:16:12.898823 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-7ddcb47dc5-2vzlt"] Oct 02 11:16:12 crc kubenswrapper[4783]: I1002 11:16:12.984220 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:16:12 crc kubenswrapper[4783]: I1002 11:16:12.984964 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:16:13 crc kubenswrapper[4783]: I1002 11:16:13.117375 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:16:13 crc kubenswrapper[4783]: I1002 11:16:13.384488 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" event={"ID":"06ff3ffc-66e4-48b6-a386-0cc72c47f104","Type":"ContainerStarted","Data":"e2661c6fb4f5d05041ace45380a2d97e6d9de9da6f33482549d3fbcef8120d63"} Oct 02 11:16:13 crc kubenswrapper[4783]: I1002 11:16:13.395559 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b2e9930a-1339-45f4-9a48-4b4cba57c5b5","Type":"ContainerStarted","Data":"da4fe1b263399806d029a482bfe3e446dfa6afd9e12cd072084bdde2cdc1101f"} Oct 02 11:16:14 crc kubenswrapper[4783]: I1002 11:16:14.470785 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b2e9930a-1339-45f4-9a48-4b4cba57c5b5","Type":"ContainerStarted","Data":"0b860407133557136fe8a0412e9c9df9e61d11c9a8d266e5400d8ad7023dee0d"} Oct 02 11:16:14 crc kubenswrapper[4783]: I1002 11:16:14.486667 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" event={"ID":"06ff3ffc-66e4-48b6-a386-0cc72c47f104","Type":"ContainerStarted","Data":"a94fd67c731bc4243f4dc9d7828b7a1c236ac2cfc8e44d8f3c80a3f6a2f0ed92"} Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.041211 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.041823 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-central-agent" containerID="cri-o://ca63c1a8d935112ad8441ba3f78fb1f2e77ef839da542b263bbf13fc6abeeea0" gracePeriod=30 Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.042233 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="proxy-httpd" containerID="cri-o://e82e9e7db4e91bfaeb0b5cbfafdbe9d0a450f7f11bb50d0b31a39e9ad4cad3da" gracePeriod=30 Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.042299 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="sg-core" containerID="cri-o://25e3634a2d23ed6c0e9b74fba5128b629f194bea54ffe3f4d1078234e4aff9eb" gracePeriod=30 Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.042362 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-notification-agent" containerID="cri-o://495f2a3bbeb51a2655dad522f20f1f5fa9d89f13a2b7642ad3e5791e05d1cdd4" gracePeriod=30 Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.509840 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" event={"ID":"06ff3ffc-66e4-48b6-a386-0cc72c47f104","Type":"ContainerStarted","Data":"fcf6562d68008b3f5351a633627d6f481913031b24d032b2aabcdcc5d46a3297"} Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.510170 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.510183 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.513881 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"b2e9930a-1339-45f4-9a48-4b4cba57c5b5","Type":"ContainerStarted","Data":"b2a7bbda44bcd7746a9788b7ebd5a8bedc44347ed66ca851e5057246862b4994"} Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.518811 4783 generic.go:334] "Generic (PLEG): container finished" podID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerID="25e3634a2d23ed6c0e9b74fba5128b629f194bea54ffe3f4d1078234e4aff9eb" exitCode=2 Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.518853 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerDied","Data":"25e3634a2d23ed6c0e9b74fba5128b629f194bea54ffe3f4d1078234e4aff9eb"} Oct 02 11:16:15 crc kubenswrapper[4783]: I1002 11:16:15.529457 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" podStartSLOduration=4.529443677 podStartE2EDuration="4.529443677s" podCreationTimestamp="2025-10-02 11:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:16:15.527547455 +0000 UTC m=+1408.843741716" watchObservedRunningTime="2025-10-02 11:16:15.529443677 +0000 UTC m=+1408.845637938" Oct 02 11:16:16 crc kubenswrapper[4783]: I1002 11:16:16.532761 4783 generic.go:334] "Generic (PLEG): container finished" podID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerID="e82e9e7db4e91bfaeb0b5cbfafdbe9d0a450f7f11bb50d0b31a39e9ad4cad3da" exitCode=0 Oct 02 11:16:16 crc kubenswrapper[4783]: I1002 11:16:16.532796 4783 generic.go:334] "Generic (PLEG): container finished" podID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerID="495f2a3bbeb51a2655dad522f20f1f5fa9d89f13a2b7642ad3e5791e05d1cdd4" exitCode=0 Oct 02 11:16:16 crc kubenswrapper[4783]: I1002 11:16:16.532806 4783 generic.go:334] "Generic (PLEG): container finished" podID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerID="ca63c1a8d935112ad8441ba3f78fb1f2e77ef839da542b263bbf13fc6abeeea0" exitCode=0 Oct 02 11:16:16 crc kubenswrapper[4783]: I1002 11:16:16.533077 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerDied","Data":"e82e9e7db4e91bfaeb0b5cbfafdbe9d0a450f7f11bb50d0b31a39e9ad4cad3da"} Oct 02 11:16:16 crc kubenswrapper[4783]: I1002 11:16:16.533126 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerDied","Data":"495f2a3bbeb51a2655dad522f20f1f5fa9d89f13a2b7642ad3e5791e05d1cdd4"} Oct 02 11:16:16 crc kubenswrapper[4783]: I1002 11:16:16.533137 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerDied","Data":"ca63c1a8d935112ad8441ba3f78fb1f2e77ef839da542b263bbf13fc6abeeea0"} Oct 02 11:16:16 crc kubenswrapper[4783]: I1002 11:16:16.812569 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Oct 02 11:16:17 crc kubenswrapper[4783]: I1002 11:16:17.576518 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=6.576500975 podStartE2EDuration="6.576500975s" podCreationTimestamp="2025-10-02 11:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:16:15.548859648 +0000 UTC m=+1408.865053909" watchObservedRunningTime="2025-10-02 11:16:17.576500975 +0000 UTC m=+1410.892695236" Oct 02 11:16:18 crc kubenswrapper[4783]: I1002 11:16:18.137961 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" podUID="06ff3ffc-66e4-48b6-a386-0cc72c47f104" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Oct 02 11:16:21 crc kubenswrapper[4783]: I1002 11:16:21.758195 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:16:22 crc kubenswrapper[4783]: I1002 11:16:22.146608 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:22 crc kubenswrapper[4783]: I1002 11:16:22.147200 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-7ddcb47dc5-2vzlt" Oct 02 11:16:22 crc kubenswrapper[4783]: I1002 11:16:22.312764 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Oct 02 11:16:22 crc kubenswrapper[4783]: I1002 11:16:22.921905 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:16:22 crc kubenswrapper[4783]: I1002 11:16:22.924586 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="9d4e10e2-7f87-4ffc-9120-fa41d978cb4f" containerName="kube-state-metrics" containerID="cri-o://5519e8bf7ecbf73cd0360fa9494c1cdc2f27d3941645ec0fe3dd75922d9805d0" gracePeriod=30 Oct 02 11:16:22 crc kubenswrapper[4783]: I1002 11:16:22.982163 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:16:23 crc kubenswrapper[4783]: I1002 11:16:23.113677 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:16:23 crc kubenswrapper[4783]: I1002 11:16:23.113786 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:16:23 crc kubenswrapper[4783]: I1002 11:16:23.114954 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"eaaf5ac1ab8de4cd11862bbd9387b781b9c059890b204852bc3fb6e13f34a239"} pod="openstack/horizon-567b57d86d-gv6fq" containerMessage="Container horizon failed startup probe, will be restarted" Oct 02 11:16:23 crc kubenswrapper[4783]: I1002 11:16:23.115048 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" containerID="cri-o://eaaf5ac1ab8de4cd11862bbd9387b781b9c059890b204852bc3fb6e13f34a239" gracePeriod=30 Oct 02 11:16:23 crc kubenswrapper[4783]: I1002 11:16:23.651076 4783 generic.go:334] "Generic (PLEG): container finished" podID="9d4e10e2-7f87-4ffc-9120-fa41d978cb4f" containerID="5519e8bf7ecbf73cd0360fa9494c1cdc2f27d3941645ec0fe3dd75922d9805d0" exitCode=2 Oct 02 11:16:23 crc kubenswrapper[4783]: I1002 11:16:23.651121 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f","Type":"ContainerDied","Data":"5519e8bf7ecbf73cd0360fa9494c1cdc2f27d3941645ec0fe3dd75922d9805d0"} Oct 02 11:16:25 crc kubenswrapper[4783]: E1002 11:16:25.985725 4783 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified" Oct 02 11:16:25 crc kubenswrapper[4783]: E1002 11:16:25.986227 4783 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:openstackclient,Image:quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified,Command:[/bin/sleep],Args:[infinity],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n695h597h575h5b4h589h57fhc8h5f4h5c9h555hb6hfh69hcch5dh5ch5d5h74h8dhd4h575h5d7h585h79h5cbh546h9ch6bh687hb5h654hcbq,ValueFrom:nil,},EnvVar{Name:OS_CLOUD,Value:default,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_HOST,Value:metric-storage-prometheus.openstack.svc,ValueFrom:nil,},EnvVar{Name:PROMETHEUS_PORT,Value:9090,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:openstack-config,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/clouds.yaml,SubPath:clouds.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/.config/openstack/secure.yaml,SubPath:secure.yaml,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:openstack-config-secret,ReadOnly:false,MountPath:/home/cloud-admin/cloudrc,SubPath:cloudrc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-8bmsz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42401,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:*42401,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstackclient_openstack(14cd84ac-799d-4243-ba1c-3d4ff4e110cc): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Oct 02 11:16:25 crc kubenswrapper[4783]: E1002 11:16:25.987650 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstackclient" podUID="14cd84ac-799d-4243-ba1c-3d4ff4e110cc" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.438391 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.543380 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.626910 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-242pt\" (UniqueName: \"kubernetes.io/projected/9d4e10e2-7f87-4ffc-9120-fa41d978cb4f-kube-api-access-242pt\") pod \"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f\" (UID: \"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.638693 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4e10e2-7f87-4ffc-9120-fa41d978cb4f-kube-api-access-242pt" (OuterVolumeSpecName: "kube-api-access-242pt") pod "9d4e10e2-7f87-4ffc-9120-fa41d978cb4f" (UID: "9d4e10e2-7f87-4ffc-9120-fa41d978cb4f"). InnerVolumeSpecName "kube-api-access-242pt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.696657 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.699502 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d4e10e2-7f87-4ffc-9120-fa41d978cb4f","Type":"ContainerDied","Data":"049e5d36d5768c1aca3f4c8aeb249c2b619c572969e74961f4700d2ae2402018"} Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.706537 4783 scope.go:117] "RemoveContainer" containerID="5519e8bf7ecbf73cd0360fa9494c1cdc2f27d3941645ec0fe3dd75922d9805d0" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.721277 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.721761 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"b867ffde-a8a3-493e-b07e-f9c8320417ba","Type":"ContainerDied","Data":"366c70a0dda543d8e6d5cf517d0bdd49034748aac82c143f6b934ac6d27c9927"} Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.728567 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-sg-core-conf-yaml\") pod \"b867ffde-a8a3-493e-b07e-f9c8320417ba\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.728769 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-combined-ca-bundle\") pod \"b867ffde-a8a3-493e-b07e-f9c8320417ba\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.728881 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ftq4z\" (UniqueName: \"kubernetes.io/projected/b867ffde-a8a3-493e-b07e-f9c8320417ba-kube-api-access-ftq4z\") pod \"b867ffde-a8a3-493e-b07e-f9c8320417ba\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.729048 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-log-httpd\") pod \"b867ffde-a8a3-493e-b07e-f9c8320417ba\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.729139 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-scripts\") pod \"b867ffde-a8a3-493e-b07e-f9c8320417ba\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.729208 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-run-httpd\") pod \"b867ffde-a8a3-493e-b07e-f9c8320417ba\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.729297 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-config-data\") pod \"b867ffde-a8a3-493e-b07e-f9c8320417ba\" (UID: \"b867ffde-a8a3-493e-b07e-f9c8320417ba\") " Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.729917 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-242pt\" (UniqueName: \"kubernetes.io/projected/9d4e10e2-7f87-4ffc-9120-fa41d978cb4f-kube-api-access-242pt\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.733059 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "b867ffde-a8a3-493e-b07e-f9c8320417ba" (UID: "b867ffde-a8a3-493e-b07e-f9c8320417ba"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.743895 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b867ffde-a8a3-493e-b07e-f9c8320417ba-kube-api-access-ftq4z" (OuterVolumeSpecName: "kube-api-access-ftq4z") pod "b867ffde-a8a3-493e-b07e-f9c8320417ba" (UID: "b867ffde-a8a3-493e-b07e-f9c8320417ba"). InnerVolumeSpecName "kube-api-access-ftq4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:26 crc kubenswrapper[4783]: E1002 11:16:26.744086 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"openstackclient\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-openstackclient:current-podified\\\"\"" pod="openstack/openstackclient" podUID="14cd84ac-799d-4243-ba1c-3d4ff4e110cc" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.744203 4783 scope.go:117] "RemoveContainer" containerID="e82e9e7db4e91bfaeb0b5cbfafdbe9d0a450f7f11bb50d0b31a39e9ad4cad3da" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.749571 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "b867ffde-a8a3-493e-b07e-f9c8320417ba" (UID: "b867ffde-a8a3-493e-b07e-f9c8320417ba"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.753643 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-scripts" (OuterVolumeSpecName: "scripts") pod "b867ffde-a8a3-493e-b07e-f9c8320417ba" (UID: "b867ffde-a8a3-493e-b07e-f9c8320417ba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.776723 4783 scope.go:117] "RemoveContainer" containerID="25e3634a2d23ed6c0e9b74fba5128b629f194bea54ffe3f4d1078234e4aff9eb" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.795588 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.822754 4783 scope.go:117] "RemoveContainer" containerID="495f2a3bbeb51a2655dad522f20f1f5fa9d89f13a2b7642ad3e5791e05d1cdd4" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.827588 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.832073 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "b867ffde-a8a3-493e-b07e-f9c8320417ba" (UID: "b867ffde-a8a3-493e-b07e-f9c8320417ba"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.832638 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ftq4z\" (UniqueName: \"kubernetes.io/projected/b867ffde-a8a3-493e-b07e-f9c8320417ba-kube-api-access-ftq4z\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.832656 4783 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.832665 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.832674 4783 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/b867ffde-a8a3-493e-b07e-f9c8320417ba-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.832682 4783 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.840227 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:16:26 crc kubenswrapper[4783]: E1002 11:16:26.840723 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="proxy-httpd" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.840741 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="proxy-httpd" Oct 02 11:16:26 crc kubenswrapper[4783]: E1002 11:16:26.840754 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-notification-agent" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.840761 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-notification-agent" Oct 02 11:16:26 crc kubenswrapper[4783]: E1002 11:16:26.840774 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d4e10e2-7f87-4ffc-9120-fa41d978cb4f" containerName="kube-state-metrics" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.840780 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d4e10e2-7f87-4ffc-9120-fa41d978cb4f" containerName="kube-state-metrics" Oct 02 11:16:26 crc kubenswrapper[4783]: E1002 11:16:26.840790 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-central-agent" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.840796 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-central-agent" Oct 02 11:16:26 crc kubenswrapper[4783]: E1002 11:16:26.840805 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="sg-core" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.840810 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="sg-core" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.841015 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-central-agent" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.841096 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="sg-core" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.841119 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d4e10e2-7f87-4ffc-9120-fa41d978cb4f" containerName="kube-state-metrics" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.841137 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="proxy-httpd" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.841194 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" containerName="ceilometer-notification-agent" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.842039 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.845608 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.845789 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.853807 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.870869 4783 scope.go:117] "RemoveContainer" containerID="ca63c1a8d935112ad8441ba3f78fb1f2e77ef839da542b263bbf13fc6abeeea0" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.911919 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b867ffde-a8a3-493e-b07e-f9c8320417ba" (UID: "b867ffde-a8a3-493e-b07e-f9c8320417ba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.934216 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:26 crc kubenswrapper[4783]: I1002 11:16:26.947028 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-config-data" (OuterVolumeSpecName: "config-data") pod "b867ffde-a8a3-493e-b07e-f9c8320417ba" (UID: "b867ffde-a8a3-493e-b07e-f9c8320417ba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.035839 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.035896 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-24lhf\" (UniqueName: \"kubernetes.io/projected/31b6e57e-e409-4847-9fd6-86778b935975-kube-api-access-24lhf\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.036261 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.036310 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.036366 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b867ffde-a8a3-493e-b07e-f9c8320417ba-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.055534 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.068986 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.100921 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.104227 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.110307 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.110734 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.111228 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.127829 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.140533 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.140615 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.140661 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.140691 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-24lhf\" (UniqueName: \"kubernetes.io/projected/31b6e57e-e409-4847-9fd6-86778b935975-kube-api-access-24lhf\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.146943 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.160067 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.163358 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31b6e57e-e409-4847-9fd6-86778b935975-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.185253 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-24lhf\" (UniqueName: \"kubernetes.io/projected/31b6e57e-e409-4847-9fd6-86778b935975-kube-api-access-24lhf\") pod \"kube-state-metrics-0\" (UID: \"31b6e57e-e409-4847-9fd6-86778b935975\") " pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.244817 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-run-httpd\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.244891 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-log-httpd\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.244913 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.244972 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-config-data\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.245011 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-scripts\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.245057 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nr47g\" (UniqueName: \"kubernetes.io/projected/bd124073-9953-4fc0-80a0-83df1cf2bb45-kube-api-access-nr47g\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.245083 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.245148 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.315171 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.347971 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-config-data\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.348054 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-scripts\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.348089 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nr47g\" (UniqueName: \"kubernetes.io/projected/bd124073-9953-4fc0-80a0-83df1cf2bb45-kube-api-access-nr47g\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.348114 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.348156 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.348208 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-run-httpd\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.348235 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-log-httpd\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.348251 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.352717 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-run-httpd\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.372430 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-config-data\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.372843 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-scripts\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.373039 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-log-httpd\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.375956 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.376776 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.383352 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nr47g\" (UniqueName: \"kubernetes.io/projected/bd124073-9953-4fc0-80a0-83df1cf2bb45-kube-api-access-nr47g\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.396123 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.443405 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.613505 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4e10e2-7f87-4ffc-9120-fa41d978cb4f" path="/var/lib/kubelet/pods/9d4e10e2-7f87-4ffc-9120-fa41d978cb4f/volumes" Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.614690 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b867ffde-a8a3-493e-b07e-f9c8320417ba" path="/var/lib/kubelet/pods/b867ffde-a8a3-493e-b07e-f9c8320417ba/volumes" Oct 02 11:16:27 crc kubenswrapper[4783]: W1002 11:16:27.624578 4783 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd39c74b0_272d_4a58_b7ba_f5e12a7d8b1a.slice/crio-conmon-50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd39c74b0_272d_4a58_b7ba_f5e12a7d8b1a.slice/crio-conmon-50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03.scope: no such file or directory Oct 02 11:16:27 crc kubenswrapper[4783]: W1002 11:16:27.624623 4783 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd39c74b0_272d_4a58_b7ba_f5e12a7d8b1a.slice/crio-50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd39c74b0_272d_4a58_b7ba_f5e12a7d8b1a.slice/crio-50f139631a5106fb29104f45876fa168421bf137b794991ad6b9de8905ddaf03.scope: no such file or directory Oct 02 11:16:27 crc kubenswrapper[4783]: W1002 11:16:27.624642 4783 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-conmon-5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-conmon-5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e.scope: no such file or directory Oct 02 11:16:27 crc kubenswrapper[4783]: W1002 11:16:27.624662 4783 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod09e5ac64_2f67_4af3_bf5f_9628fead6591.slice/crio-5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e.scope: no such file or directory Oct 02 11:16:27 crc kubenswrapper[4783]: W1002 11:16:27.687691 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd39c74b0_272d_4a58_b7ba_f5e12a7d8b1a.slice/crio-8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4.scope WatchSource:0}: Error finding container 8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4: Status 404 returned error can't find the container with id 8b3830b590ceb549cf441695774dd8b5fc7c70930ea6c841fc4823221fc745c4 Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.788641 4783 generic.go:334] "Generic (PLEG): container finished" podID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerID="5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e" exitCode=137 Oct 02 11:16:27 crc kubenswrapper[4783]: I1002 11:16:27.788835 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"09e5ac64-2f67-4af3-bf5f-9628fead6591","Type":"ContainerDied","Data":"5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e"} Oct 02 11:16:27 crc kubenswrapper[4783]: E1002 11:16:27.970718 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f6561e9_9bc7_4cde_adda_c142e6f6eb7a.slice/crio-conmon-8589ca3751f97966b896bc74baed6713e4a70d9e4ad200e250db9c7cf485015a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fc8f318_4f05_4ada_be59_c258785a63bb.slice/crio-db4c5a2fec82b44ac4c3de44a768351e75e0ce690dfd16900181730a4ea65252\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f6561e9_9bc7_4cde_adda_c142e6f6eb7a.slice/crio-8589ca3751f97966b896bc74baed6713e4a70d9e4ad200e250db9c7cf485015a.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fc8f318_4f05_4ada_be59_c258785a63bb.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d4e10e2_7f87_4ffc_9120_fa41d978cb4f.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod149e251f_f012_4120_ac20_bbd76bf0fcd3.slice/crio-2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd39c74b0_272d_4a58_b7ba_f5e12a7d8b1a.slice/crio-9d09ded9de9dccbd3674ce785e5846361f2dd0649c7a423ca883da4f72059af1\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod149e251f_f012_4120_ac20_bbd76bf0fcd3.slice/crio-3cbbc3bc4598e0085effb3513ed5ba3df96286607dc628b8674e2c535c79745d\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice/crio-e82e9e7db4e91bfaeb0b5cbfafdbe9d0a450f7f11bb50d0b31a39e9ad4cad3da.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d4e10e2_7f87_4ffc_9120_fa41d978cb4f.slice/crio-conmon-5519e8bf7ecbf73cd0360fa9494c1cdc2f27d3941645ec0fe3dd75922d9805d0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice/crio-495f2a3bbeb51a2655dad522f20f1f5fa9d89f13a2b7642ad3e5791e05d1cdd4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8abc2e1e_de94_4880_8a75_0c7ee0a2cdba.slice/crio-conmon-17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice/crio-conmon-e82e9e7db4e91bfaeb0b5cbfafdbe9d0a450f7f11bb50d0b31a39e9ad4cad3da.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice/crio-366c70a0dda543d8e6d5cf517d0bdd49034748aac82c143f6b934ac6d27c9927\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f6561e9_9bc7_4cde_adda_c142e6f6eb7a.slice/crio-42dbf0d885c2f45bc7c336d620f72044895639ebf18513403d8d9842f3988e6b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4fc8f318_4f05_4ada_be59_c258785a63bb.slice/crio-conmon-f32bade4f25b8974536e9df49cf73283ce470904c5c6166ae585beb911790d62.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice/crio-conmon-495f2a3bbeb51a2655dad522f20f1f5fa9d89f13a2b7642ad3e5791e05d1cdd4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d4e10e2_7f87_4ffc_9120_fa41d978cb4f.slice/crio-5519e8bf7ecbf73cd0360fa9494c1cdc2f27d3941645ec0fe3dd75922d9805d0.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6f6561e9_9bc7_4cde_adda_c142e6f6eb7a.slice/crio-conmon-42dbf0d885c2f45bc7c336d620f72044895639ebf18513403d8d9842f3988e6b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8abc2e1e_de94_4880_8a75_0c7ee0a2cdba.slice/crio-17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod149e251f_f012_4120_ac20_bbd76bf0fcd3.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd39c74b0_272d_4a58_b7ba_f5e12a7d8b1a.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice/crio-conmon-25e3634a2d23ed6c0e9b74fba5128b629f194bea54ffe3f4d1078234e4aff9eb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod149e251f_f012_4120_ac20_bbd76bf0fcd3.slice/crio-conmon-2e02aba1069f0d41138fe2d1b33978eebda85e3776bf4211569fb60b061ac0d2.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb867ffde_a8a3_493e_b07e_f9c8320417ba.slice/crio-25e3634a2d23ed6c0e9b74fba5128b629f194bea54ffe3f4d1078234e4aff9eb.scope\": RecentStats: unable to find data in memory cache]" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.067430 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Oct 02 11:16:28 crc kubenswrapper[4783]: W1002 11:16:28.067446 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod31b6e57e_e409_4847_9fd6_86778b935975.slice/crio-3b7c4d712ace7dbf98d1b8573b6008feb1e3ccbeb41c13100807a25f5464c903 WatchSource:0}: Error finding container 3b7c4d712ace7dbf98d1b8573b6008feb1e3ccbeb41c13100807a25f5464c903: Status 404 returned error can't find the container with id 3b7c4d712ace7dbf98d1b8573b6008feb1e3ccbeb41c13100807a25f5464c903 Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.275988 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.321611 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.371452 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/09e5ac64-2f67-4af3-bf5f-9628fead6591-etc-machine-id\") pod \"09e5ac64-2f67-4af3-bf5f-9628fead6591\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.371672 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-scripts\") pod \"09e5ac64-2f67-4af3-bf5f-9628fead6591\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.371773 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t2bc6\" (UniqueName: \"kubernetes.io/projected/09e5ac64-2f67-4af3-bf5f-9628fead6591-kube-api-access-t2bc6\") pod \"09e5ac64-2f67-4af3-bf5f-9628fead6591\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.371827 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/09e5ac64-2f67-4af3-bf5f-9628fead6591-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "09e5ac64-2f67-4af3-bf5f-9628fead6591" (UID: "09e5ac64-2f67-4af3-bf5f-9628fead6591"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.371884 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-combined-ca-bundle\") pod \"09e5ac64-2f67-4af3-bf5f-9628fead6591\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.372172 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09e5ac64-2f67-4af3-bf5f-9628fead6591-logs\") pod \"09e5ac64-2f67-4af3-bf5f-9628fead6591\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.372255 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data-custom\") pod \"09e5ac64-2f67-4af3-bf5f-9628fead6591\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.372566 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data\") pod \"09e5ac64-2f67-4af3-bf5f-9628fead6591\" (UID: \"09e5ac64-2f67-4af3-bf5f-9628fead6591\") " Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.373287 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/09e5ac64-2f67-4af3-bf5f-9628fead6591-logs" (OuterVolumeSpecName: "logs") pod "09e5ac64-2f67-4af3-bf5f-9628fead6591" (UID: "09e5ac64-2f67-4af3-bf5f-9628fead6591"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.375370 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/09e5ac64-2f67-4af3-bf5f-9628fead6591-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.375451 4783 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/09e5ac64-2f67-4af3-bf5f-9628fead6591-etc-machine-id\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.398623 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09e5ac64-2f67-4af3-bf5f-9628fead6591-kube-api-access-t2bc6" (OuterVolumeSpecName: "kube-api-access-t2bc6") pod "09e5ac64-2f67-4af3-bf5f-9628fead6591" (UID: "09e5ac64-2f67-4af3-bf5f-9628fead6591"). InnerVolumeSpecName "kube-api-access-t2bc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.403704 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-scripts" (OuterVolumeSpecName: "scripts") pod "09e5ac64-2f67-4af3-bf5f-9628fead6591" (UID: "09e5ac64-2f67-4af3-bf5f-9628fead6591"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.417552 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "09e5ac64-2f67-4af3-bf5f-9628fead6591" (UID: "09e5ac64-2f67-4af3-bf5f-9628fead6591"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.435987 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "09e5ac64-2f67-4af3-bf5f-9628fead6591" (UID: "09e5ac64-2f67-4af3-bf5f-9628fead6591"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.477060 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.477092 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t2bc6\" (UniqueName: \"kubernetes.io/projected/09e5ac64-2f67-4af3-bf5f-9628fead6591-kube-api-access-t2bc6\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.477102 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.477111 4783 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data-custom\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.487828 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data" (OuterVolumeSpecName: "config-data") pod "09e5ac64-2f67-4af3-bf5f-9628fead6591" (UID: "09e5ac64-2f67-4af3-bf5f-9628fead6591"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.578892 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/09e5ac64-2f67-4af3-bf5f-9628fead6591-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.802639 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"09e5ac64-2f67-4af3-bf5f-9628fead6591","Type":"ContainerDied","Data":"67194c0c81fa2bcdf9986a477d7941da3fb21a340622b7f224f42fcffac27d1b"} Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.803003 4783 scope.go:117] "RemoveContainer" containerID="5a665f5d67500ee7742615b9f967a62e6b1ca0229674a6da9ba38dafd6c7687e" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.802806 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.804301 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerStarted","Data":"aefd69f3210a9edafda256bd390a7d6d3fdfea14da20a9681a840f1bd0870f88"} Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.806615 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"31b6e57e-e409-4847-9fd6-86778b935975","Type":"ContainerStarted","Data":"3b7c4d712ace7dbf98d1b8573b6008feb1e3ccbeb41c13100807a25f5464c903"} Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.874108 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.875112 4783 scope.go:117] "RemoveContainer" containerID="25187a18e95ed30b9822fb265b8eb95a53fac4497c73ab6aaebcc75064f8846e" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.892080 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.907526 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:16:28 crc kubenswrapper[4783]: E1002 11:16:28.908084 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api-log" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.908178 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api-log" Oct 02 11:16:28 crc kubenswrapper[4783]: E1002 11:16:28.908271 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.908332 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.908582 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.908845 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" containerName="cinder-api-log" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.909859 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.915926 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.916267 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.916495 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Oct 02 11:16:28 crc kubenswrapper[4783]: I1002 11:16:28.955458 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.021282 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-config-data\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.021484 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nq4jv\" (UniqueName: \"kubernetes.io/projected/88339a82-647c-4a4d-a42e-aa70a74c3bd0-kube-api-access-nq4jv\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.025162 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.025337 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.025446 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-config-data-custom\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.025580 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.025621 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88339a82-647c-4a4d-a42e-aa70a74c3bd0-logs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.025653 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-scripts\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.025685 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88339a82-647c-4a4d-a42e-aa70a74c3bd0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127477 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127541 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-config-data-custom\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127601 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127624 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88339a82-647c-4a4d-a42e-aa70a74c3bd0-logs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127646 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-scripts\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127666 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88339a82-647c-4a4d-a42e-aa70a74c3bd0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127703 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-config-data\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127746 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nq4jv\" (UniqueName: \"kubernetes.io/projected/88339a82-647c-4a4d-a42e-aa70a74c3bd0-kube-api-access-nq4jv\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.127795 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.129121 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/88339a82-647c-4a4d-a42e-aa70a74c3bd0-logs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.129276 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/88339a82-647c-4a4d-a42e-aa70a74c3bd0-etc-machine-id\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.139432 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.140120 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.140980 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-scripts\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.142975 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-config-data-custom\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.153244 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-config-data\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.156340 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/88339a82-647c-4a4d-a42e-aa70a74c3bd0-public-tls-certs\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.156757 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nq4jv\" (UniqueName: \"kubernetes.io/projected/88339a82-647c-4a4d-a42e-aa70a74c3bd0-kube-api-access-nq4jv\") pod \"cinder-api-0\" (UID: \"88339a82-647c-4a4d-a42e-aa70a74c3bd0\") " pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.336840 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.571140 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09e5ac64-2f67-4af3-bf5f-9628fead6591" path="/var/lib/kubelet/pods/09e5ac64-2f67-4af3-bf5f-9628fead6591/volumes" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.654172 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7c59bccbcc-jsxj5" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.744815 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6f9ccfdf84-lzmhg"] Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.745069 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6f9ccfdf84-lzmhg" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-api" containerID="cri-o://861615351fe69ffff68fa1bcc5227cc1fabe6d6fef29df981f56bfd566fb6f87" gracePeriod=30 Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.745549 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-6f9ccfdf84-lzmhg" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-httpd" containerID="cri-o://3cf11c98a87f9759ba0d6875b34c45e1630062d3a871ae3a48da137df6dbd021" gracePeriod=30 Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.831984 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerStarted","Data":"a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090"} Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.833678 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"31b6e57e-e409-4847-9fd6-86778b935975","Type":"ContainerStarted","Data":"d64cd1de19f85b51a17dddb72a3520a6b525a6ad9558f10336b3fd499af11d3f"} Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.834790 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.866728 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=3.295469524 podStartE2EDuration="3.86671041s" podCreationTimestamp="2025-10-02 11:16:26 +0000 UTC" firstStartedPulling="2025-10-02 11:16:28.070713753 +0000 UTC m=+1421.386908014" lastFinishedPulling="2025-10-02 11:16:28.641954639 +0000 UTC m=+1421.958148900" observedRunningTime="2025-10-02 11:16:29.866324489 +0000 UTC m=+1423.182518750" watchObservedRunningTime="2025-10-02 11:16:29.86671041 +0000 UTC m=+1423.182904671" Oct 02 11:16:29 crc kubenswrapper[4783]: I1002 11:16:29.900962 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Oct 02 11:16:29 crc kubenswrapper[4783]: W1002 11:16:29.915585 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88339a82_647c_4a4d_a42e_aa70a74c3bd0.slice/crio-495efd724e2a84ce84fae8a8ff96da60e44e6a18db66645695bfeaabb2d74cc0 WatchSource:0}: Error finding container 495efd724e2a84ce84fae8a8ff96da60e44e6a18db66645695bfeaabb2d74cc0: Status 404 returned error can't find the container with id 495efd724e2a84ce84fae8a8ff96da60e44e6a18db66645695bfeaabb2d74cc0 Oct 02 11:16:30 crc kubenswrapper[4783]: I1002 11:16:30.903290 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"88339a82-647c-4a4d-a42e-aa70a74c3bd0","Type":"ContainerStarted","Data":"acdf2d35bfb38008a51c666f2bd55b5de15aac473ecc9bd33a2fa774313293fb"} Oct 02 11:16:30 crc kubenswrapper[4783]: I1002 11:16:30.903998 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"88339a82-647c-4a4d-a42e-aa70a74c3bd0","Type":"ContainerStarted","Data":"495efd724e2a84ce84fae8a8ff96da60e44e6a18db66645695bfeaabb2d74cc0"} Oct 02 11:16:30 crc kubenswrapper[4783]: I1002 11:16:30.905765 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerStarted","Data":"98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81"} Oct 02 11:16:30 crc kubenswrapper[4783]: I1002 11:16:30.916098 4783 generic.go:334] "Generic (PLEG): container finished" podID="1b74946e-4754-418a-a8cd-30512a841704" containerID="3cf11c98a87f9759ba0d6875b34c45e1630062d3a871ae3a48da137df6dbd021" exitCode=0 Oct 02 11:16:30 crc kubenswrapper[4783]: I1002 11:16:30.916865 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f9ccfdf84-lzmhg" event={"ID":"1b74946e-4754-418a-a8cd-30512a841704","Type":"ContainerDied","Data":"3cf11c98a87f9759ba0d6875b34c45e1630062d3a871ae3a48da137df6dbd021"} Oct 02 11:16:31 crc kubenswrapper[4783]: I1002 11:16:31.928883 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"88339a82-647c-4a4d-a42e-aa70a74c3bd0","Type":"ContainerStarted","Data":"0eb8fc5e0f2dbce7ee1d81bd78d0f58eb4ac5b45bd55c56fe8ffe6efbae48f81"} Oct 02 11:16:31 crc kubenswrapper[4783]: I1002 11:16:31.929271 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Oct 02 11:16:31 crc kubenswrapper[4783]: I1002 11:16:31.933487 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerStarted","Data":"1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2"} Oct 02 11:16:31 crc kubenswrapper[4783]: I1002 11:16:31.955195 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.955175462 podStartE2EDuration="3.955175462s" podCreationTimestamp="2025-10-02 11:16:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:16:31.949819815 +0000 UTC m=+1425.266014076" watchObservedRunningTime="2025-10-02 11:16:31.955175462 +0000 UTC m=+1425.271369723" Oct 02 11:16:32 crc kubenswrapper[4783]: I1002 11:16:32.982553 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:16:33 crc kubenswrapper[4783]: I1002 11:16:33.970898 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerStarted","Data":"7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad"} Oct 02 11:16:33 crc kubenswrapper[4783]: I1002 11:16:33.971342 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 02 11:16:34 crc kubenswrapper[4783]: I1002 11:16:34.009461 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.208955787 podStartE2EDuration="7.009439477s" podCreationTimestamp="2025-10-02 11:16:27 +0000 UTC" firstStartedPulling="2025-10-02 11:16:28.270597534 +0000 UTC m=+1421.586791795" lastFinishedPulling="2025-10-02 11:16:33.071081224 +0000 UTC m=+1426.387275485" observedRunningTime="2025-10-02 11:16:34.00335711 +0000 UTC m=+1427.319551391" watchObservedRunningTime="2025-10-02 11:16:34.009439477 +0000 UTC m=+1427.325633738" Oct 02 11:16:35 crc kubenswrapper[4783]: I1002 11:16:35.086496 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:16:35 crc kubenswrapper[4783]: I1002 11:16:35.087101 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-log" containerID="cri-o://e50236fc5cac20cd9b4cff71215510458ff498a2e3572cfcbeea785ab45803f8" gracePeriod=30 Oct 02 11:16:35 crc kubenswrapper[4783]: I1002 11:16:35.087129 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-httpd" containerID="cri-o://18ff904c3244a87925e50c6ef8c9e260a9662888608826189c0acc5790554d3e" gracePeriod=30 Oct 02 11:16:35 crc kubenswrapper[4783]: I1002 11:16:35.103675 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-httpd" probeResult="failure" output="Get \"https://10.217.0.151:9292/healthcheck\": EOF" Oct 02 11:16:35 crc kubenswrapper[4783]: I1002 11:16:35.103703 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/glance-default-internal-api-0" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-log" probeResult="failure" output="Get \"https://10.217.0.151:9292/healthcheck\": EOF" Oct 02 11:16:35 crc kubenswrapper[4783]: I1002 11:16:35.988229 4783 generic.go:334] "Generic (PLEG): container finished" podID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerID="e50236fc5cac20cd9b4cff71215510458ff498a2e3572cfcbeea785ab45803f8" exitCode=143 Oct 02 11:16:35 crc kubenswrapper[4783]: I1002 11:16:35.988280 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5","Type":"ContainerDied","Data":"e50236fc5cac20cd9b4cff71215510458ff498a2e3572cfcbeea785ab45803f8"} Oct 02 11:16:36 crc kubenswrapper[4783]: I1002 11:16:36.296780 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:36 crc kubenswrapper[4783]: I1002 11:16:36.298512 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-central-agent" containerID="cri-o://a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090" gracePeriod=30 Oct 02 11:16:36 crc kubenswrapper[4783]: I1002 11:16:36.298695 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="proxy-httpd" containerID="cri-o://7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad" gracePeriod=30 Oct 02 11:16:36 crc kubenswrapper[4783]: I1002 11:16:36.298856 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="sg-core" containerID="cri-o://1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2" gracePeriod=30 Oct 02 11:16:36 crc kubenswrapper[4783]: I1002 11:16:36.298807 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-notification-agent" containerID="cri-o://98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81" gracePeriod=30 Oct 02 11:16:37 crc kubenswrapper[4783]: I1002 11:16:37.001061 4783 generic.go:334] "Generic (PLEG): container finished" podID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerID="7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad" exitCode=0 Oct 02 11:16:37 crc kubenswrapper[4783]: I1002 11:16:37.001089 4783 generic.go:334] "Generic (PLEG): container finished" podID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerID="1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2" exitCode=2 Oct 02 11:16:37 crc kubenswrapper[4783]: I1002 11:16:37.001096 4783 generic.go:334] "Generic (PLEG): container finished" podID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerID="98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81" exitCode=0 Oct 02 11:16:37 crc kubenswrapper[4783]: I1002 11:16:37.001123 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerDied","Data":"7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad"} Oct 02 11:16:37 crc kubenswrapper[4783]: I1002 11:16:37.001147 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerDied","Data":"1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2"} Oct 02 11:16:37 crc kubenswrapper[4783]: I1002 11:16:37.001157 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerDied","Data":"98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81"} Oct 02 11:16:37 crc kubenswrapper[4783]: I1002 11:16:37.340086 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Oct 02 11:16:39 crc kubenswrapper[4783]: I1002 11:16:39.018808 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"14cd84ac-799d-4243-ba1c-3d4ff4e110cc","Type":"ContainerStarted","Data":"dbd566e74dfe44b17a6f9709d3b86445dca9d157ca892ba0a577feae0ea1dcaa"} Oct 02 11:16:39 crc kubenswrapper[4783]: I1002 11:16:39.040572 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=1.815768853 podStartE2EDuration="32.040549229s" podCreationTimestamp="2025-10-02 11:16:07 +0000 UTC" firstStartedPulling="2025-10-02 11:16:08.108545666 +0000 UTC m=+1401.424739927" lastFinishedPulling="2025-10-02 11:16:38.333326032 +0000 UTC m=+1431.649520303" observedRunningTime="2025-10-02 11:16:39.038280797 +0000 UTC m=+1432.354475068" watchObservedRunningTime="2025-10-02 11:16:39.040549229 +0000 UTC m=+1432.356743490" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.290689 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gk7v9"] Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.292880 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.316044 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gk7v9"] Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.395281 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-utilities\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.396345 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-catalog-content\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.396373 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pvprb\" (UniqueName: \"kubernetes.io/projected/f16a661f-1590-4925-af88-6bc8deb8e227-kube-api-access-pvprb\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.497776 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-utilities\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.498069 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-catalog-content\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.498170 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pvprb\" (UniqueName: \"kubernetes.io/projected/f16a661f-1590-4925-af88-6bc8deb8e227-kube-api-access-pvprb\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.498396 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-utilities\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.498748 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-catalog-content\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.516782 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pvprb\" (UniqueName: \"kubernetes.io/projected/f16a661f-1590-4925-af88-6bc8deb8e227-kube-api-access-pvprb\") pod \"redhat-operators-gk7v9\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:41 crc kubenswrapper[4783]: I1002 11:16:41.612902 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.049051 4783 generic.go:334] "Generic (PLEG): container finished" podID="1b74946e-4754-418a-a8cd-30512a841704" containerID="861615351fe69ffff68fa1bcc5227cc1fabe6d6fef29df981f56bfd566fb6f87" exitCode=0 Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.049375 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f9ccfdf84-lzmhg" event={"ID":"1b74946e-4754-418a-a8cd-30512a841704","Type":"ContainerDied","Data":"861615351fe69ffff68fa1bcc5227cc1fabe6d6fef29df981f56bfd566fb6f87"} Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.123575 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gk7v9"] Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.209935 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.325698 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-combined-ca-bundle\") pod \"1b74946e-4754-418a-a8cd-30512a841704\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.325755 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-httpd-config\") pod \"1b74946e-4754-418a-a8cd-30512a841704\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.325833 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-config\") pod \"1b74946e-4754-418a-a8cd-30512a841704\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.325996 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-ovndb-tls-certs\") pod \"1b74946e-4754-418a-a8cd-30512a841704\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.326109 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mjtl8\" (UniqueName: \"kubernetes.io/projected/1b74946e-4754-418a-a8cd-30512a841704-kube-api-access-mjtl8\") pod \"1b74946e-4754-418a-a8cd-30512a841704\" (UID: \"1b74946e-4754-418a-a8cd-30512a841704\") " Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.333089 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "1b74946e-4754-418a-a8cd-30512a841704" (UID: "1b74946e-4754-418a-a8cd-30512a841704"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.333113 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b74946e-4754-418a-a8cd-30512a841704-kube-api-access-mjtl8" (OuterVolumeSpecName: "kube-api-access-mjtl8") pod "1b74946e-4754-418a-a8cd-30512a841704" (UID: "1b74946e-4754-418a-a8cd-30512a841704"). InnerVolumeSpecName "kube-api-access-mjtl8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.384241 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1b74946e-4754-418a-a8cd-30512a841704" (UID: "1b74946e-4754-418a-a8cd-30512a841704"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.391179 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.393811 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-config" (OuterVolumeSpecName: "config") pod "1b74946e-4754-418a-a8cd-30512a841704" (UID: "1b74946e-4754-418a-a8cd-30512a841704"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.427997 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mjtl8\" (UniqueName: \"kubernetes.io/projected/1b74946e-4754-418a-a8cd-30512a841704-kube-api-access-mjtl8\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.428294 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.428396 4783 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-httpd-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.428511 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.444777 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "1b74946e-4754-418a-a8cd-30512a841704" (UID: "1b74946e-4754-418a-a8cd-30512a841704"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:42 crc kubenswrapper[4783]: I1002 11:16:42.530816 4783 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/1b74946e-4754-418a-a8cd-30512a841704-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.097272 4783 generic.go:334] "Generic (PLEG): container finished" podID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerID="18ff904c3244a87925e50c6ef8c9e260a9662888608826189c0acc5790554d3e" exitCode=0 Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.097779 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5","Type":"ContainerDied","Data":"18ff904c3244a87925e50c6ef8c9e260a9662888608826189c0acc5790554d3e"} Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.135792 4783 generic.go:334] "Generic (PLEG): container finished" podID="f16a661f-1590-4925-af88-6bc8deb8e227" containerID="5b1c2a6aef6242eecb4daa12b3fdeb492af744841cf5588f0e97a67a366b65e1" exitCode=0 Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.136719 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gk7v9" event={"ID":"f16a661f-1590-4925-af88-6bc8deb8e227","Type":"ContainerDied","Data":"5b1c2a6aef6242eecb4daa12b3fdeb492af744841cf5588f0e97a67a366b65e1"} Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.136752 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gk7v9" event={"ID":"f16a661f-1590-4925-af88-6bc8deb8e227","Type":"ContainerStarted","Data":"f50ca8d166a2b863f095a8b72a55939162ba786be1c167c1a05a886372505d61"} Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.140384 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-6f9ccfdf84-lzmhg" event={"ID":"1b74946e-4754-418a-a8cd-30512a841704","Type":"ContainerDied","Data":"fb557630c1aff6034fafe14349765ccb74ab2c00afb100fd5d638a6de582e9ef"} Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.140445 4783 scope.go:117] "RemoveContainer" containerID="3cf11c98a87f9759ba0d6875b34c45e1630062d3a871ae3a48da137df6dbd021" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.140741 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-6f9ccfdf84-lzmhg" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.339656 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.360119 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-6f9ccfdf84-lzmhg"] Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.367207 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-6f9ccfdf84-lzmhg"] Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.399747 4783 scope.go:117] "RemoveContainer" containerID="861615351fe69ffff68fa1bcc5227cc1fabe6d6fef29df981f56bfd566fb6f87" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.462956 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z8f4w\" (UniqueName: \"kubernetes.io/projected/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-kube-api-access-z8f4w\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.463156 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-scripts\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.463204 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-httpd-run\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.463253 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-logs\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.463299 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-internal-tls-certs\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.463339 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-config-data\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.463431 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.463471 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-combined-ca-bundle\") pod \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\" (UID: \"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.468027 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-logs" (OuterVolumeSpecName: "logs") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.468814 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.491177 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-kube-api-access-z8f4w" (OuterVolumeSpecName: "kube-api-access-z8f4w") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "kube-api-access-z8f4w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.501606 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-scripts" (OuterVolumeSpecName: "scripts") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.501725 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage09-crc" (OuterVolumeSpecName: "glance") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "local-storage09-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.532621 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.562258 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b74946e-4754-418a-a8cd-30512a841704" path="/var/lib/kubelet/pods/1b74946e-4754-418a-a8cd-30512a841704/volumes" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.566622 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.566653 4783 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.566664 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.566689 4783 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.566701 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.566715 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z8f4w\" (UniqueName: \"kubernetes.io/projected/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-kube-api-access-z8f4w\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.608835 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.635057 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-config-data" (OuterVolumeSpecName: "config-data") pod "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" (UID: "591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.644131 4783 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage09-crc" (UniqueName: "kubernetes.io/local-volume/local-storage09-crc") on node "crc" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.668710 4783 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.668750 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.668763 4783 reconciler_common.go:293] "Volume detached for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.784508 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871242 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-ceilometer-tls-certs\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871427 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-combined-ca-bundle\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871460 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-run-httpd\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871475 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-sg-core-conf-yaml\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871499 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-log-httpd\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871540 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-scripts\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871650 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nr47g\" (UniqueName: \"kubernetes.io/projected/bd124073-9953-4fc0-80a0-83df1cf2bb45-kube-api-access-nr47g\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.871681 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-config-data\") pod \"bd124073-9953-4fc0-80a0-83df1cf2bb45\" (UID: \"bd124073-9953-4fc0-80a0-83df1cf2bb45\") " Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.874762 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.874915 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.879634 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-scripts" (OuterVolumeSpecName: "scripts") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.893049 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd124073-9953-4fc0-80a0-83df1cf2bb45-kube-api-access-nr47g" (OuterVolumeSpecName: "kube-api-access-nr47g") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "kube-api-access-nr47g". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.930524 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.939620 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.973200 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nr47g\" (UniqueName: \"kubernetes.io/projected/bd124073-9953-4fc0-80a0-83df1cf2bb45-kube-api-access-nr47g\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.973236 4783 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.973246 4783 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.973254 4783 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.973263 4783 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/bd124073-9953-4fc0-80a0-83df1cf2bb45-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.973270 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:43 crc kubenswrapper[4783]: I1002 11:16:43.982607 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.008706 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-config-data" (OuterVolumeSpecName: "config-data") pod "bd124073-9953-4fc0-80a0-83df1cf2bb45" (UID: "bd124073-9953-4fc0-80a0-83df1cf2bb45"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.074483 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.074764 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd124073-9953-4fc0-80a0-83df1cf2bb45-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.151933 4783 generic.go:334] "Generic (PLEG): container finished" podID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerID="a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090" exitCode=0 Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.152006 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.152024 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerDied","Data":"a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090"} Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.152080 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"bd124073-9953-4fc0-80a0-83df1cf2bb45","Type":"ContainerDied","Data":"aefd69f3210a9edafda256bd390a7d6d3fdfea14da20a9681a840f1bd0870f88"} Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.152102 4783 scope.go:117] "RemoveContainer" containerID="7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.170657 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5","Type":"ContainerDied","Data":"7f3ea338794d35487f01da25139791360378f7c07ff57dbacad4d0e0ee911279"} Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.170741 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.196121 4783 scope.go:117] "RemoveContainer" containerID="1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.205455 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.230773 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.260321 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.274972 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.288569 4783 scope.go:117] "RemoveContainer" containerID="98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294097 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294503 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-central-agent" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294521 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-central-agent" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294543 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294550 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294560 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294566 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294578 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="proxy-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294584 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="proxy-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294596 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="sg-core" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294603 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="sg-core" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294610 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-api" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294616 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-api" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294630 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-log" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294636 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-log" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.294665 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-notification-agent" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294671 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-notification-agent" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294856 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-log" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294872 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" containerName="glance-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294881 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-api" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294890 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="proxy-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294900 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b74946e-4754-418a-a8cd-30512a841704" containerName="neutron-httpd" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294908 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-notification-agent" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294916 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="sg-core" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.294924 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" containerName="ceilometer-central-agent" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.297679 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.301627 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.301840 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.301971 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.315279 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.316706 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.318666 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.318819 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.340275 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.357557 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.375975 4783 scope.go:117] "RemoveContainer" containerID="a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.379828 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-scripts\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.379890 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-run-httpd\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.379914 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.379945 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.379974 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.380000 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-log-httpd\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.380023 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kmw9m\" (UniqueName: \"kubernetes.io/projected/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-kube-api-access-kmw9m\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.380075 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-config-data\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.425253 4783 scope.go:117] "RemoveContainer" containerID="7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.425669 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad\": container with ID starting with 7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad not found: ID does not exist" containerID="7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.425701 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad"} err="failed to get container status \"7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad\": rpc error: code = NotFound desc = could not find container \"7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad\": container with ID starting with 7b7fb78c49199a12cdbda16bb272be9e88e424771933f5c99919d181c6ccc6ad not found: ID does not exist" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.425721 4783 scope.go:117] "RemoveContainer" containerID="1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.426331 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2\": container with ID starting with 1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2 not found: ID does not exist" containerID="1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.426352 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2"} err="failed to get container status \"1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2\": rpc error: code = NotFound desc = could not find container \"1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2\": container with ID starting with 1885b3dd5d8b68a204f7cf9fb7627eae15b2ffaebe0c845cfef68bcbc73cb5a2 not found: ID does not exist" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.426365 4783 scope.go:117] "RemoveContainer" containerID="98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.426715 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81\": container with ID starting with 98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81 not found: ID does not exist" containerID="98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.426739 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81"} err="failed to get container status \"98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81\": rpc error: code = NotFound desc = could not find container \"98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81\": container with ID starting with 98e27db7e64aba92b66194fc41fca0347134f8c40220c2d1f0b2c5e718e52d81 not found: ID does not exist" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.426752 4783 scope.go:117] "RemoveContainer" containerID="a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090" Oct 02 11:16:44 crc kubenswrapper[4783]: E1002 11:16:44.428403 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090\": container with ID starting with a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090 not found: ID does not exist" containerID="a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.428680 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090"} err="failed to get container status \"a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090\": rpc error: code = NotFound desc = could not find container \"a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090\": container with ID starting with a31cf18275afc7723dabcf8dcd540f059d43b2b072491473785c47b3dbe7d090 not found: ID does not exist" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.428700 4783 scope.go:117] "RemoveContainer" containerID="18ff904c3244a87925e50c6ef8c9e260a9662888608826189c0acc5790554d3e" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481395 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d68ff5d3-015e-4fb4-a1e2-617122af5a45-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481471 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481496 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481532 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481615 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-log-httpd\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481635 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjtsm\" (UniqueName: \"kubernetes.io/projected/d68ff5d3-015e-4fb4-a1e2-617122af5a45-kube-api-access-cjtsm\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481659 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kmw9m\" (UniqueName: \"kubernetes.io/projected/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-kube-api-access-kmw9m\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481763 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481851 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-config-data\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481872 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481932 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d68ff5d3-015e-4fb4-a1e2-617122af5a45-logs\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.481973 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.482004 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-log-httpd\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.482041 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.482100 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-scripts\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.482147 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-run-httpd\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.482251 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.482998 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-run-httpd\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.489933 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.489991 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-scripts\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.490704 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.491227 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-config-data\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.497793 4783 scope.go:117] "RemoveContainer" containerID="e50236fc5cac20cd9b4cff71215510458ff498a2e3572cfcbeea785ab45803f8" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.497968 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.502007 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kmw9m\" (UniqueName: \"kubernetes.io/projected/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-kube-api-access-kmw9m\") pod \"ceilometer-0\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.584504 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.584621 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d68ff5d3-015e-4fb4-a1e2-617122af5a45-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.584646 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.584690 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjtsm\" (UniqueName: \"kubernetes.io/projected/d68ff5d3-015e-4fb4-a1e2-617122af5a45-kube-api-access-cjtsm\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.584774 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.585116 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") device mount path \"/mnt/openstack/pv09\"" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.585117 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d68ff5d3-015e-4fb4-a1e2-617122af5a45-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.585147 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.585583 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d68ff5d3-015e-4fb4-a1e2-617122af5a45-logs\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.585607 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.586692 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d68ff5d3-015e-4fb4-a1e2-617122af5a45-logs\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.591969 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-scripts\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.592084 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.593287 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.594326 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d68ff5d3-015e-4fb4-a1e2-617122af5a45-config-data\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.612300 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjtsm\" (UniqueName: \"kubernetes.io/projected/d68ff5d3-015e-4fb4-a1e2-617122af5a45-kube-api-access-cjtsm\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.634207 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.640956 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage09-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage09-crc\") pod \"glance-default-internal-api-0\" (UID: \"d68ff5d3-015e-4fb4-a1e2-617122af5a45\") " pod="openstack/glance-default-internal-api-0" Oct 02 11:16:44 crc kubenswrapper[4783]: I1002 11:16:44.660133 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:45 crc kubenswrapper[4783]: I1002 11:16:45.195273 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gk7v9" event={"ID":"f16a661f-1590-4925-af88-6bc8deb8e227","Type":"ContainerStarted","Data":"c00e977c3a1c26702373235673c6e69c4dd95c2b8d7d12a98813e5327e360668"} Oct 02 11:16:45 crc kubenswrapper[4783]: I1002 11:16:45.253789 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:45 crc kubenswrapper[4783]: I1002 11:16:45.440701 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:16:45 crc kubenswrapper[4783]: I1002 11:16:45.524586 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Oct 02 11:16:45 crc kubenswrapper[4783]: I1002 11:16:45.578839 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5" path="/var/lib/kubelet/pods/591ff3e8-f8cc-4ca4-81a4-5b1cfc19ceb5/volumes" Oct 02 11:16:45 crc kubenswrapper[4783]: I1002 11:16:45.579877 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd124073-9953-4fc0-80a0-83df1cf2bb45" path="/var/lib/kubelet/pods/bd124073-9953-4fc0-80a0-83df1cf2bb45/volumes" Oct 02 11:16:46 crc kubenswrapper[4783]: I1002 11:16:46.221790 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d68ff5d3-015e-4fb4-a1e2-617122af5a45","Type":"ContainerStarted","Data":"92725da26cb9fd95f879661864ccf59d295cb3f908baecdec783dc14f7f3297e"} Oct 02 11:16:46 crc kubenswrapper[4783]: I1002 11:16:46.224042 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerStarted","Data":"bc1277c6094efeacda1ed65f271fb3cfa8bae2400b67d0b9a417e75d7856e5f0"} Oct 02 11:16:46 crc kubenswrapper[4783]: I1002 11:16:46.652677 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:16:46 crc kubenswrapper[4783]: I1002 11:16:46.652963 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-log" containerID="cri-o://4e6ac63eed2338754a176ea676e3a145a674a9566cbdddb5d20fd0d89861fee5" gracePeriod=30 Oct 02 11:16:46 crc kubenswrapper[4783]: I1002 11:16:46.653458 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-httpd" containerID="cri-o://c96f3388bdb9fb30ee9e91bfef63294eff4d35265da91c49c40a99af49e4f40c" gracePeriod=30 Oct 02 11:16:47 crc kubenswrapper[4783]: I1002 11:16:47.247763 4783 generic.go:334] "Generic (PLEG): container finished" podID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerID="4e6ac63eed2338754a176ea676e3a145a674a9566cbdddb5d20fd0d89861fee5" exitCode=143 Oct 02 11:16:47 crc kubenswrapper[4783]: I1002 11:16:47.248359 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cd779125-a75d-4045-a75c-e7ed1fc66d5a","Type":"ContainerDied","Data":"4e6ac63eed2338754a176ea676e3a145a674a9566cbdddb5d20fd0d89861fee5"} Oct 02 11:16:47 crc kubenswrapper[4783]: I1002 11:16:47.252396 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d68ff5d3-015e-4fb4-a1e2-617122af5a45","Type":"ContainerStarted","Data":"d42b3e3efb76e42119482ce7e9b1424da12b7fd7e5ddf3f244e79e8db19f75f6"} Oct 02 11:16:47 crc kubenswrapper[4783]: I1002 11:16:47.984607 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:16:47 crc kubenswrapper[4783]: I1002 11:16:47.984961 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:16:47 crc kubenswrapper[4783]: I1002 11:16:47.985729 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"9a79bf128cd748cac66115e7e4373946c6413ab941b85a301fa5e56d361363ac"} pod="openstack/horizon-5fcdf587dd-wvthh" containerMessage="Container horizon failed startup probe, will be restarted" Oct 02 11:16:47 crc kubenswrapper[4783]: I1002 11:16:47.985759 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" containerID="cri-o://9a79bf128cd748cac66115e7e4373946c6413ab941b85a301fa5e56d361363ac" gracePeriod=30 Oct 02 11:16:48 crc kubenswrapper[4783]: I1002 11:16:48.286673 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"d68ff5d3-015e-4fb4-a1e2-617122af5a45","Type":"ContainerStarted","Data":"625c0780e36bb2b71202642cbf344403f42492d6955b6498c903d0534aa57652"} Oct 02 11:16:48 crc kubenswrapper[4783]: I1002 11:16:48.332604 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=4.332584763 podStartE2EDuration="4.332584763s" podCreationTimestamp="2025-10-02 11:16:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:16:48.309775208 +0000 UTC m=+1441.625969459" watchObservedRunningTime="2025-10-02 11:16:48.332584763 +0000 UTC m=+1441.648779024" Oct 02 11:16:49 crc kubenswrapper[4783]: I1002 11:16:49.298845 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerStarted","Data":"9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7"} Oct 02 11:16:50 crc kubenswrapper[4783]: I1002 11:16:50.311980 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerStarted","Data":"7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092"} Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.334850 4783 generic.go:334] "Generic (PLEG): container finished" podID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerID="c96f3388bdb9fb30ee9e91bfef63294eff4d35265da91c49c40a99af49e4f40c" exitCode=0 Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.334935 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cd779125-a75d-4045-a75c-e7ed1fc66d5a","Type":"ContainerDied","Data":"c96f3388bdb9fb30ee9e91bfef63294eff4d35265da91c49c40a99af49e4f40c"} Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.788304 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868016 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbpn4\" (UniqueName: \"kubernetes.io/projected/cd779125-a75d-4045-a75c-e7ed1fc66d5a-kube-api-access-rbpn4\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868080 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-scripts\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868144 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-public-tls-certs\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868218 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-logs\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868269 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868298 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-httpd-run\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868365 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-config-data\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.868494 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-combined-ca-bundle\") pod \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\" (UID: \"cd779125-a75d-4045-a75c-e7ed1fc66d5a\") " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.869299 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-logs" (OuterVolumeSpecName: "logs") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.869550 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.874720 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "glance") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.877059 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-scripts" (OuterVolumeSpecName: "scripts") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.880760 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd779125-a75d-4045-a75c-e7ed1fc66d5a-kube-api-access-rbpn4" (OuterVolumeSpecName: "kube-api-access-rbpn4") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "kube-api-access-rbpn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.967666 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-config-data" (OuterVolumeSpecName: "config-data") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.968181 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.973727 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.973839 4783 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.973858 4783 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/cd779125-a75d-4045-a75c-e7ed1fc66d5a-httpd-run\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.973871 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.973881 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.973918 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbpn4\" (UniqueName: \"kubernetes.io/projected/cd779125-a75d-4045-a75c-e7ed1fc66d5a-kube-api-access-rbpn4\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:52 crc kubenswrapper[4783]: I1002 11:16:52.973931 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.006559 4783 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.015057 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "cd779125-a75d-4045-a75c-e7ed1fc66d5a" (UID: "cd779125-a75d-4045-a75c-e7ed1fc66d5a"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.076211 4783 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/cd779125-a75d-4045-a75c-e7ed1fc66d5a-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.076249 4783 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.368975 4783 generic.go:334] "Generic (PLEG): container finished" podID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerID="9a79bf128cd748cac66115e7e4373946c6413ab941b85a301fa5e56d361363ac" exitCode=0 Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.369068 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerDied","Data":"9a79bf128cd748cac66115e7e4373946c6413ab941b85a301fa5e56d361363ac"} Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.369107 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerStarted","Data":"c82809b9c6eba0a8170866eb1d26bf56ad2341d1b01f79a18fbfd51af89b92ce"} Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.369128 4783 scope.go:117] "RemoveContainer" containerID="17dc5053f7eb1f95b75a44af44ad2579d939fa2adfc83af523f7bb53acc883e3" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.382782 4783 generic.go:334] "Generic (PLEG): container finished" podID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerID="eaaf5ac1ab8de4cd11862bbd9387b781b9c059890b204852bc3fb6e13f34a239" exitCode=137 Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.382868 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerDied","Data":"eaaf5ac1ab8de4cd11862bbd9387b781b9c059890b204852bc3fb6e13f34a239"} Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.408764 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"cd779125-a75d-4045-a75c-e7ed1fc66d5a","Type":"ContainerDied","Data":"8ac7d27c846dfc67805ffbfd60dfe18285b3a7a6e05f1361f2fb4aeb0abcd092"} Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.408934 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.421900 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerStarted","Data":"2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40"} Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.470822 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.485265 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.512482 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:16:53 crc kubenswrapper[4783]: E1002 11:16:53.512980 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-httpd" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.513006 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-httpd" Oct 02 11:16:53 crc kubenswrapper[4783]: E1002 11:16:53.513037 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-log" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.513046 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-log" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.513289 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-log" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.513313 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" containerName="glance-httpd" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.514485 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.520136 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.520139 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.581057 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd779125-a75d-4045-a75c-e7ed1fc66d5a" path="/var/lib/kubelet/pods/cd779125-a75d-4045-a75c-e7ed1fc66d5a/volumes" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.583371 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593712 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-scripts\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593788 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-config-data\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593819 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7efac855-15fd-4a91-bd78-b7ad296dd6b6-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593843 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593912 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593940 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7jr6h\" (UniqueName: \"kubernetes.io/projected/7efac855-15fd-4a91-bd78-b7ad296dd6b6-kube-api-access-7jr6h\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593958 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.593987 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7efac855-15fd-4a91-bd78-b7ad296dd6b6-logs\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.617034 4783 scope.go:117] "RemoveContainer" containerID="58153738b4583103c9b6b2a35fefcf7fa1e4c224a6092baa570c4337ad49f3cb" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.695597 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7efac855-15fd-4a91-bd78-b7ad296dd6b6-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.695672 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.695819 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.695863 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7jr6h\" (UniqueName: \"kubernetes.io/projected/7efac855-15fd-4a91-bd78-b7ad296dd6b6-kube-api-access-7jr6h\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.695893 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.695961 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7efac855-15fd-4a91-bd78-b7ad296dd6b6-logs\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.695991 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-scripts\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.696070 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-config-data\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.696114 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.696819 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/7efac855-15fd-4a91-bd78-b7ad296dd6b6-logs\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.697595 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/7efac855-15fd-4a91-bd78-b7ad296dd6b6-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.703285 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-config-data\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.703957 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.706273 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.708012 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7efac855-15fd-4a91-bd78-b7ad296dd6b6-scripts\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.724405 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7jr6h\" (UniqueName: \"kubernetes.io/projected/7efac855-15fd-4a91-bd78-b7ad296dd6b6-kube-api-access-7jr6h\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.751177 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"glance-default-external-api-0\" (UID: \"7efac855-15fd-4a91-bd78-b7ad296dd6b6\") " pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.850477 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.884697 4783 scope.go:117] "RemoveContainer" containerID="c96f3388bdb9fb30ee9e91bfef63294eff4d35265da91c49c40a99af49e4f40c" Oct 02 11:16:53 crc kubenswrapper[4783]: I1002 11:16:53.930588 4783 scope.go:117] "RemoveContainer" containerID="4e6ac63eed2338754a176ea676e3a145a674a9566cbdddb5d20fd0d89861fee5" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.461256 4783 generic.go:334] "Generic (PLEG): container finished" podID="f16a661f-1590-4925-af88-6bc8deb8e227" containerID="c00e977c3a1c26702373235673c6e69c4dd95c2b8d7d12a98813e5327e360668" exitCode=0 Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.461435 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gk7v9" event={"ID":"f16a661f-1590-4925-af88-6bc8deb8e227","Type":"ContainerDied","Data":"c00e977c3a1c26702373235673c6e69c4dd95c2b8d7d12a98813e5327e360668"} Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.514067 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerStarted","Data":"76e35b57de4991d61267b7cf5435417f916259cc2da6cb259292b12054c98ddd"} Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.593629 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-cm7cm"] Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.595100 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-cm7cm" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.605805 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-cm7cm"] Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.663714 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.663762 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.709152 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-vtkp6"] Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.710309 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vtkp6" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.721925 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c9rmr\" (UniqueName: \"kubernetes.io/projected/b7d8ce26-e566-4c5b-86be-55096f9346f1-kube-api-access-c9rmr\") pod \"nova-api-db-create-cm7cm\" (UID: \"b7d8ce26-e566-4c5b-86be-55096f9346f1\") " pod="openstack/nova-api-db-create-cm7cm" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.729332 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-vtkp6"] Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.771098 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.771901 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.795264 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-v6ktl"] Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.797389 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v6ktl" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.823538 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c9rmr\" (UniqueName: \"kubernetes.io/projected/b7d8ce26-e566-4c5b-86be-55096f9346f1-kube-api-access-c9rmr\") pod \"nova-api-db-create-cm7cm\" (UID: \"b7d8ce26-e566-4c5b-86be-55096f9346f1\") " pod="openstack/nova-api-db-create-cm7cm" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.823664 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5jhns\" (UniqueName: \"kubernetes.io/projected/46f71a3d-452c-4ab3-a0e6-b3f318ab2cea-kube-api-access-5jhns\") pod \"nova-cell0-db-create-vtkp6\" (UID: \"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea\") " pod="openstack/nova-cell0-db-create-vtkp6" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.823698 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nkxv\" (UniqueName: \"kubernetes.io/projected/ba16e941-c6c8-42a9-86c8-49aa0af48a36-kube-api-access-2nkxv\") pod \"nova-cell1-db-create-v6ktl\" (UID: \"ba16e941-c6c8-42a9-86c8-49aa0af48a36\") " pod="openstack/nova-cell1-db-create-v6ktl" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.827182 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-v6ktl"] Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.859828 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c9rmr\" (UniqueName: \"kubernetes.io/projected/b7d8ce26-e566-4c5b-86be-55096f9346f1-kube-api-access-c9rmr\") pod \"nova-api-db-create-cm7cm\" (UID: \"b7d8ce26-e566-4c5b-86be-55096f9346f1\") " pod="openstack/nova-api-db-create-cm7cm" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.867752 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.919279 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-cm7cm" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.927486 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5jhns\" (UniqueName: \"kubernetes.io/projected/46f71a3d-452c-4ab3-a0e6-b3f318ab2cea-kube-api-access-5jhns\") pod \"nova-cell0-db-create-vtkp6\" (UID: \"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea\") " pod="openstack/nova-cell0-db-create-vtkp6" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.927532 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nkxv\" (UniqueName: \"kubernetes.io/projected/ba16e941-c6c8-42a9-86c8-49aa0af48a36-kube-api-access-2nkxv\") pod \"nova-cell1-db-create-v6ktl\" (UID: \"ba16e941-c6c8-42a9-86c8-49aa0af48a36\") " pod="openstack/nova-cell1-db-create-v6ktl" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.952592 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5jhns\" (UniqueName: \"kubernetes.io/projected/46f71a3d-452c-4ab3-a0e6-b3f318ab2cea-kube-api-access-5jhns\") pod \"nova-cell0-db-create-vtkp6\" (UID: \"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea\") " pod="openstack/nova-cell0-db-create-vtkp6" Oct 02 11:16:54 crc kubenswrapper[4783]: I1002 11:16:54.973447 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nkxv\" (UniqueName: \"kubernetes.io/projected/ba16e941-c6c8-42a9-86c8-49aa0af48a36-kube-api-access-2nkxv\") pod \"nova-cell1-db-create-v6ktl\" (UID: \"ba16e941-c6c8-42a9-86c8-49aa0af48a36\") " pod="openstack/nova-cell1-db-create-v6ktl" Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.049559 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vtkp6" Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.132320 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v6ktl" Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.605516 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-central-agent" containerID="cri-o://9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7" gracePeriod=30 Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.606201 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="proxy-httpd" containerID="cri-o://76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f" gracePeriod=30 Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.606266 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="sg-core" containerID="cri-o://2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40" gracePeriod=30 Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.606308 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-notification-agent" containerID="cri-o://7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092" gracePeriod=30 Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.682304 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.795330118 podStartE2EDuration="11.682275235s" podCreationTimestamp="2025-10-02 11:16:44 +0000 UTC" firstStartedPulling="2025-10-02 11:16:45.265689751 +0000 UTC m=+1438.581884022" lastFinishedPulling="2025-10-02 11:16:54.152634878 +0000 UTC m=+1447.468829139" observedRunningTime="2025-10-02 11:16:55.644039778 +0000 UTC m=+1448.960234039" watchObservedRunningTime="2025-10-02 11:16:55.682275235 +0000 UTC m=+1448.998469496" Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.687016 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.687056 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-cm7cm"] Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.687074 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerStarted","Data":"76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f"} Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.687094 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.687110 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.687119 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7efac855-15fd-4a91-bd78-b7ad296dd6b6","Type":"ContainerStarted","Data":"0c70a3e7bb47adbf2011b2a85c0c5cb5a43f26825f80f4215acdf72b28b897ff"} Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.778874 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-vtkp6"] Oct 02 11:16:55 crc kubenswrapper[4783]: I1002 11:16:55.980264 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-v6ktl"] Oct 02 11:16:56 crc kubenswrapper[4783]: W1002 11:16:56.005929 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podba16e941_c6c8_42a9_86c8_49aa0af48a36.slice/crio-28cfdc1ad3f85d1a7523dcd49d6851fa7576aac118307d298f912b380094c7ec WatchSource:0}: Error finding container 28cfdc1ad3f85d1a7523dcd49d6851fa7576aac118307d298f912b380094c7ec: Status 404 returned error can't find the container with id 28cfdc1ad3f85d1a7523dcd49d6851fa7576aac118307d298f912b380094c7ec Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.664910 4783 generic.go:334] "Generic (PLEG): container finished" podID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerID="76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f" exitCode=0 Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.665616 4783 generic.go:334] "Generic (PLEG): container finished" podID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerID="2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40" exitCode=2 Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.665629 4783 generic.go:334] "Generic (PLEG): container finished" podID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerID="7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092" exitCode=0 Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.665561 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerDied","Data":"76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.665735 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerDied","Data":"2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.665749 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerDied","Data":"7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.674016 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7efac855-15fd-4a91-bd78-b7ad296dd6b6","Type":"ContainerStarted","Data":"8ca4893c9182b1e4ecb9a6ae6d056a5c99c0158f6f8fc6ad507eb87129b3c3d1"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.687028 4783 generic.go:334] "Generic (PLEG): container finished" podID="46f71a3d-452c-4ab3-a0e6-b3f318ab2cea" containerID="b17948d991ca5496c7138e40dae05320a691e18c619214bd060780abf39dec80" exitCode=0 Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.687088 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-vtkp6" event={"ID":"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea","Type":"ContainerDied","Data":"b17948d991ca5496c7138e40dae05320a691e18c619214bd060780abf39dec80"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.687113 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-vtkp6" event={"ID":"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea","Type":"ContainerStarted","Data":"1e5327eeb0b3cab639965d24a0d2dd4b21a24c85f0732e05094d9d3b32b2690b"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.689907 4783 generic.go:334] "Generic (PLEG): container finished" podID="b7d8ce26-e566-4c5b-86be-55096f9346f1" containerID="b6f802f6c9ce0a6f4eac792d704235d8eb80cddd292eead7a77e161b5a437961" exitCode=0 Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.689959 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-cm7cm" event={"ID":"b7d8ce26-e566-4c5b-86be-55096f9346f1","Type":"ContainerDied","Data":"b6f802f6c9ce0a6f4eac792d704235d8eb80cddd292eead7a77e161b5a437961"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.689982 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-cm7cm" event={"ID":"b7d8ce26-e566-4c5b-86be-55096f9346f1","Type":"ContainerStarted","Data":"ba012f4c8c58b7ba671f645b2717ec6732d6a1a8d9f25c0ff97ca27b62b67905"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.692964 4783 generic.go:334] "Generic (PLEG): container finished" podID="ba16e941-c6c8-42a9-86c8-49aa0af48a36" containerID="62ae0881e9c33a7d3d13a85515153660452d1392dbc6513dc22a6366f2183d58" exitCode=0 Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.693027 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-v6ktl" event={"ID":"ba16e941-c6c8-42a9-86c8-49aa0af48a36","Type":"ContainerDied","Data":"62ae0881e9c33a7d3d13a85515153660452d1392dbc6513dc22a6366f2183d58"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.693053 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-v6ktl" event={"ID":"ba16e941-c6c8-42a9-86c8-49aa0af48a36","Type":"ContainerStarted","Data":"28cfdc1ad3f85d1a7523dcd49d6851fa7576aac118307d298f912b380094c7ec"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.699848 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gk7v9" event={"ID":"f16a661f-1590-4925-af88-6bc8deb8e227","Type":"ContainerStarted","Data":"deb49821e76adeaab5b6fe0725d4c3497a49f304143890085cea573bf4f55ac7"} Oct 02 11:16:56 crc kubenswrapper[4783]: I1002 11:16:56.755848 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gk7v9" podStartSLOduration=3.066987303 podStartE2EDuration="15.755825748s" podCreationTimestamp="2025-10-02 11:16:41 +0000 UTC" firstStartedPulling="2025-10-02 11:16:43.173130098 +0000 UTC m=+1436.489324359" lastFinishedPulling="2025-10-02 11:16:55.861968533 +0000 UTC m=+1449.178162804" observedRunningTime="2025-10-02 11:16:56.752916449 +0000 UTC m=+1450.069110710" watchObservedRunningTime="2025-10-02 11:16:56.755825748 +0000 UTC m=+1450.072020009" Oct 02 11:16:57 crc kubenswrapper[4783]: I1002 11:16:57.710834 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"7efac855-15fd-4a91-bd78-b7ad296dd6b6","Type":"ContainerStarted","Data":"c00b3a99be9e8415f4c85f9c8b2bf4cfcc21768d3154d11a607739b8a305d394"} Oct 02 11:16:57 crc kubenswrapper[4783]: I1002 11:16:57.710878 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:16:57 crc kubenswrapper[4783]: I1002 11:16:57.711534 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:16:57 crc kubenswrapper[4783]: I1002 11:16:57.743873 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=4.74384741 podStartE2EDuration="4.74384741s" podCreationTimestamp="2025-10-02 11:16:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:16:57.734860544 +0000 UTC m=+1451.051054805" watchObservedRunningTime="2025-10-02 11:16:57.74384741 +0000 UTC m=+1451.060041671" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.185594 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-cm7cm" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.276926 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c9rmr\" (UniqueName: \"kubernetes.io/projected/b7d8ce26-e566-4c5b-86be-55096f9346f1-kube-api-access-c9rmr\") pod \"b7d8ce26-e566-4c5b-86be-55096f9346f1\" (UID: \"b7d8ce26-e566-4c5b-86be-55096f9346f1\") " Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.285920 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b7d8ce26-e566-4c5b-86be-55096f9346f1-kube-api-access-c9rmr" (OuterVolumeSpecName: "kube-api-access-c9rmr") pod "b7d8ce26-e566-4c5b-86be-55096f9346f1" (UID: "b7d8ce26-e566-4c5b-86be-55096f9346f1"). InnerVolumeSpecName "kube-api-access-c9rmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.357110 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v6ktl" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.363157 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vtkp6" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.382129 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nkxv\" (UniqueName: \"kubernetes.io/projected/ba16e941-c6c8-42a9-86c8-49aa0af48a36-kube-api-access-2nkxv\") pod \"ba16e941-c6c8-42a9-86c8-49aa0af48a36\" (UID: \"ba16e941-c6c8-42a9-86c8-49aa0af48a36\") " Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.382574 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c9rmr\" (UniqueName: \"kubernetes.io/projected/b7d8ce26-e566-4c5b-86be-55096f9346f1-kube-api-access-c9rmr\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.387183 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ba16e941-c6c8-42a9-86c8-49aa0af48a36-kube-api-access-2nkxv" (OuterVolumeSpecName: "kube-api-access-2nkxv") pod "ba16e941-c6c8-42a9-86c8-49aa0af48a36" (UID: "ba16e941-c6c8-42a9-86c8-49aa0af48a36"). InnerVolumeSpecName "kube-api-access-2nkxv". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.484263 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5jhns\" (UniqueName: \"kubernetes.io/projected/46f71a3d-452c-4ab3-a0e6-b3f318ab2cea-kube-api-access-5jhns\") pod \"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea\" (UID: \"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea\") " Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.484946 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nkxv\" (UniqueName: \"kubernetes.io/projected/ba16e941-c6c8-42a9-86c8-49aa0af48a36-kube-api-access-2nkxv\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.494200 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/46f71a3d-452c-4ab3-a0e6-b3f318ab2cea-kube-api-access-5jhns" (OuterVolumeSpecName: "kube-api-access-5jhns") pod "46f71a3d-452c-4ab3-a0e6-b3f318ab2cea" (UID: "46f71a3d-452c-4ab3-a0e6-b3f318ab2cea"). InnerVolumeSpecName "kube-api-access-5jhns". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.587013 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5jhns\" (UniqueName: \"kubernetes.io/projected/46f71a3d-452c-4ab3-a0e6-b3f318ab2cea-kube-api-access-5jhns\") on node \"crc\" DevicePath \"\"" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.722618 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-v6ktl" event={"ID":"ba16e941-c6c8-42a9-86c8-49aa0af48a36","Type":"ContainerDied","Data":"28cfdc1ad3f85d1a7523dcd49d6851fa7576aac118307d298f912b380094c7ec"} Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.722663 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="28cfdc1ad3f85d1a7523dcd49d6851fa7576aac118307d298f912b380094c7ec" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.722726 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-v6ktl" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.731284 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-vtkp6" event={"ID":"46f71a3d-452c-4ab3-a0e6-b3f318ab2cea","Type":"ContainerDied","Data":"1e5327eeb0b3cab639965d24a0d2dd4b21a24c85f0732e05094d9d3b32b2690b"} Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.731334 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1e5327eeb0b3cab639965d24a0d2dd4b21a24c85f0732e05094d9d3b32b2690b" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.731397 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-vtkp6" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.733933 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-cm7cm" event={"ID":"b7d8ce26-e566-4c5b-86be-55096f9346f1","Type":"ContainerDied","Data":"ba012f4c8c58b7ba671f645b2717ec6732d6a1a8d9f25c0ff97ca27b62b67905"} Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.734065 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-cm7cm" Oct 02 11:16:58 crc kubenswrapper[4783]: I1002 11:16:58.737089 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ba012f4c8c58b7ba671f645b2717ec6732d6a1a8d9f25c0ff97ca27b62b67905" Oct 02 11:16:58 crc kubenswrapper[4783]: E1002 11:16:58.828959 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46f71a3d_452c_4ab3_a0e6_b3f318ab2cea.slice/crio-1e5327eeb0b3cab639965d24a0d2dd4b21a24c85f0732e05094d9d3b32b2690b\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod46f71a3d_452c_4ab3_a0e6_b3f318ab2cea.slice\": RecentStats: unable to find data in memory cache]" Oct 02 11:16:59 crc kubenswrapper[4783]: I1002 11:16:59.702756 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 02 11:16:59 crc kubenswrapper[4783]: I1002 11:16:59.703249 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:16:59 crc kubenswrapper[4783]: I1002 11:16:59.713228 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.613357 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.616175 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.670351 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.702398 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.793487 4783 generic.go:334] "Generic (PLEG): container finished" podID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerID="9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7" exitCode=0 Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.793601 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.793592 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerDied","Data":"9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7"} Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.793664 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"43fe06ee-67fa-4e7e-856c-e2fa72d07d65","Type":"ContainerDied","Data":"bc1277c6094efeacda1ed65f271fb3cfa8bae2400b67d0b9a417e75d7856e5f0"} Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.793696 4783 scope.go:117] "RemoveContainer" containerID="76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.816121 4783 scope.go:117] "RemoveContainer" containerID="2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.832501 4783 scope.go:117] "RemoveContainer" containerID="7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.832637 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850273 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-combined-ca-bundle\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850331 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kmw9m\" (UniqueName: \"kubernetes.io/projected/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-kube-api-access-kmw9m\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850372 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-sg-core-conf-yaml\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850532 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-run-httpd\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850559 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-log-httpd\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850692 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-config-data\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850716 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-ceilometer-tls-certs\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.850732 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-scripts\") pod \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\" (UID: \"43fe06ee-67fa-4e7e-856c-e2fa72d07d65\") " Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.851077 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.856470 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.860862 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-scripts" (OuterVolumeSpecName: "scripts") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.862397 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-kube-api-access-kmw9m" (OuterVolumeSpecName: "kube-api-access-kmw9m") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "kube-api-access-kmw9m". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.866095 4783 scope.go:117] "RemoveContainer" containerID="9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.915905 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gk7v9"] Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.934253 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.953056 4783 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.953084 4783 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.953092 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.953101 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kmw9m\" (UniqueName: \"kubernetes.io/projected/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-kube-api-access-kmw9m\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.953110 4783 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.969492 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.978645 4783 scope.go:117] "RemoveContainer" containerID="76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f" Oct 02 11:17:01 crc kubenswrapper[4783]: E1002 11:17:01.981542 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f\": container with ID starting with 76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f not found: ID does not exist" containerID="76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.981599 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f"} err="failed to get container status \"76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f\": rpc error: code = NotFound desc = could not find container \"76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f\": container with ID starting with 76d79a438800492aa842f6cdcd7d15f3b51fbbc3946d8b9af2119eeeef4eb46f not found: ID does not exist" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.981628 4783 scope.go:117] "RemoveContainer" containerID="2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40" Oct 02 11:17:01 crc kubenswrapper[4783]: E1002 11:17:01.984507 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40\": container with ID starting with 2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40 not found: ID does not exist" containerID="2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.984530 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40"} err="failed to get container status \"2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40\": rpc error: code = NotFound desc = could not find container \"2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40\": container with ID starting with 2f6d5a5c872edf6b9e7c3fff87e1396ec3a9c7fabcebde18607f95844ac88a40 not found: ID does not exist" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.984548 4783 scope.go:117] "RemoveContainer" containerID="7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092" Oct 02 11:17:01 crc kubenswrapper[4783]: E1002 11:17:01.988096 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092\": container with ID starting with 7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092 not found: ID does not exist" containerID="7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.988119 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092"} err="failed to get container status \"7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092\": rpc error: code = NotFound desc = could not find container \"7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092\": container with ID starting with 7bf964470b3f53abdc76b2edea6ec464be7615c3152f21cc48b787820846a092 not found: ID does not exist" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.988131 4783 scope.go:117] "RemoveContainer" containerID="9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7" Oct 02 11:17:01 crc kubenswrapper[4783]: E1002 11:17:01.988667 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7\": container with ID starting with 9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7 not found: ID does not exist" containerID="9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.988710 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7"} err="failed to get container status \"9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7\": rpc error: code = NotFound desc = could not find container \"9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7\": container with ID starting with 9ef1536e530b59f854722f92a7cc7e14a6032a2cf986c306b80b1c412b2694f7 not found: ID does not exist" Oct 02 11:17:01 crc kubenswrapper[4783]: I1002 11:17:01.994522 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.009667 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-config-data" (OuterVolumeSpecName: "config-data") pod "43fe06ee-67fa-4e7e-856c-e2fa72d07d65" (UID: "43fe06ee-67fa-4e7e-856c-e2fa72d07d65"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.054577 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.054613 4783 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.054627 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/43fe06ee-67fa-4e7e-856c-e2fa72d07d65-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.132853 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.141216 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.162956 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:02 crc kubenswrapper[4783]: E1002 11:17:02.163290 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ba16e941-c6c8-42a9-86c8-49aa0af48a36" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163305 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ba16e941-c6c8-42a9-86c8-49aa0af48a36" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: E1002 11:17:02.163320 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="sg-core" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163326 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="sg-core" Oct 02 11:17:02 crc kubenswrapper[4783]: E1002 11:17:02.163339 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b7d8ce26-e566-4c5b-86be-55096f9346f1" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163345 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b7d8ce26-e566-4c5b-86be-55096f9346f1" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: E1002 11:17:02.163358 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="proxy-httpd" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163363 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="proxy-httpd" Oct 02 11:17:02 crc kubenswrapper[4783]: E1002 11:17:02.163372 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="46f71a3d-452c-4ab3-a0e6-b3f318ab2cea" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163377 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="46f71a3d-452c-4ab3-a0e6-b3f318ab2cea" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: E1002 11:17:02.163390 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-notification-agent" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163396 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-notification-agent" Oct 02 11:17:02 crc kubenswrapper[4783]: E1002 11:17:02.163427 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-central-agent" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163433 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-central-agent" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163584 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="proxy-httpd" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163599 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-notification-agent" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163610 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="ceilometer-central-agent" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163625 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="46f71a3d-452c-4ab3-a0e6-b3f318ab2cea" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163635 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ba16e941-c6c8-42a9-86c8-49aa0af48a36" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163641 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" containerName="sg-core" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.163649 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b7d8ce26-e566-4c5b-86be-55096f9346f1" containerName="mariadb-database-create" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.170319 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.173193 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.173620 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.180970 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.200003 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.257771 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.258126 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.258187 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-twtzb\" (UniqueName: \"kubernetes.io/projected/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-kube-api-access-twtzb\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.258230 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-scripts\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.258274 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-config-data\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.258371 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-log-httpd\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.258400 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-run-httpd\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.258459 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359390 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-run-httpd\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359494 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359520 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359536 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359566 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-twtzb\" (UniqueName: \"kubernetes.io/projected/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-kube-api-access-twtzb\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359592 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-scripts\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359620 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-config-data\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.359682 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-log-httpd\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.360056 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-log-httpd\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.360525 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-run-httpd\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.367001 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.372791 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.379267 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-scripts\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.379969 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-twtzb\" (UniqueName: \"kubernetes.io/projected/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-kube-api-access-twtzb\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.381714 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-config-data\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.385463 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.486129 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.980762 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.981139 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:17:02 crc kubenswrapper[4783]: I1002 11:17:02.982678 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.023338 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.113676 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.113726 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.115558 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.571694 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43fe06ee-67fa-4e7e-856c-e2fa72d07d65" path="/var/lib/kubelet/pods/43fe06ee-67fa-4e7e-856c-e2fa72d07d65/volumes" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.817263 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerStarted","Data":"bbcc133e16ebdb9095795c909f85a3d698b1a6921b154700d3be3d4800618ec8"} Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.817385 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gk7v9" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="registry-server" containerID="cri-o://deb49821e76adeaab5b6fe0725d4c3497a49f304143890085cea573bf4f55ac7" gracePeriod=2 Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.851084 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.851950 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.913762 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 02 11:17:03 crc kubenswrapper[4783]: I1002 11:17:03.915968 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.378587 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.767840 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-c6a7-account-create-8sr5h"] Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.769285 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6a7-account-create-8sr5h" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.788092 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c6a7-account-create-8sr5h"] Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.789804 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.845174 4783 generic.go:334] "Generic (PLEG): container finished" podID="f16a661f-1590-4925-af88-6bc8deb8e227" containerID="deb49821e76adeaab5b6fe0725d4c3497a49f304143890085cea573bf4f55ac7" exitCode=0 Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.846413 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gk7v9" event={"ID":"f16a661f-1590-4925-af88-6bc8deb8e227","Type":"ContainerDied","Data":"deb49821e76adeaab5b6fe0725d4c3497a49f304143890085cea573bf4f55ac7"} Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.846473 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.846567 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.907233 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jgmk\" (UniqueName: \"kubernetes.io/projected/dec7f801-786b-472a-8e75-a4db5d89f15c-kube-api-access-6jgmk\") pod \"nova-api-c6a7-account-create-8sr5h\" (UID: \"dec7f801-786b-472a-8e75-a4db5d89f15c\") " pod="openstack/nova-api-c6a7-account-create-8sr5h" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.948040 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-7ce3-account-create-ng7bk"] Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.949376 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7ce3-account-create-ng7bk" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.956670 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Oct 02 11:17:04 crc kubenswrapper[4783]: I1002 11:17:04.963168 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7ce3-account-create-ng7bk"] Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.008559 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jgmk\" (UniqueName: \"kubernetes.io/projected/dec7f801-786b-472a-8e75-a4db5d89f15c-kube-api-access-6jgmk\") pod \"nova-api-c6a7-account-create-8sr5h\" (UID: \"dec7f801-786b-472a-8e75-a4db5d89f15c\") " pod="openstack/nova-api-c6a7-account-create-8sr5h" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.035932 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jgmk\" (UniqueName: \"kubernetes.io/projected/dec7f801-786b-472a-8e75-a4db5d89f15c-kube-api-access-6jgmk\") pod \"nova-api-c6a7-account-create-8sr5h\" (UID: \"dec7f801-786b-472a-8e75-a4db5d89f15c\") " pod="openstack/nova-api-c6a7-account-create-8sr5h" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.036913 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.049337 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-26f6-account-create-hst2q"] Oct 02 11:17:05 crc kubenswrapper[4783]: E1002 11:17:05.053702 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="extract-utilities" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.053745 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="extract-utilities" Oct 02 11:17:05 crc kubenswrapper[4783]: E1002 11:17:05.053759 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="extract-content" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.053767 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="extract-content" Oct 02 11:17:05 crc kubenswrapper[4783]: E1002 11:17:05.053785 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="registry-server" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.053794 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="registry-server" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.054102 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" containerName="registry-server" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.059672 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-26f6-account-create-hst2q"] Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.059803 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-26f6-account-create-hst2q" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.075620 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.110047 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pvprb\" (UniqueName: \"kubernetes.io/projected/f16a661f-1590-4925-af88-6bc8deb8e227-kube-api-access-pvprb\") pod \"f16a661f-1590-4925-af88-6bc8deb8e227\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.110232 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-catalog-content\") pod \"f16a661f-1590-4925-af88-6bc8deb8e227\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.110329 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-utilities\") pod \"f16a661f-1590-4925-af88-6bc8deb8e227\" (UID: \"f16a661f-1590-4925-af88-6bc8deb8e227\") " Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.111602 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-utilities" (OuterVolumeSpecName: "utilities") pod "f16a661f-1590-4925-af88-6bc8deb8e227" (UID: "f16a661f-1590-4925-af88-6bc8deb8e227"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.117351 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f16a661f-1590-4925-af88-6bc8deb8e227-kube-api-access-pvprb" (OuterVolumeSpecName: "kube-api-access-pvprb") pod "f16a661f-1590-4925-af88-6bc8deb8e227" (UID: "f16a661f-1590-4925-af88-6bc8deb8e227"). InnerVolumeSpecName "kube-api-access-pvprb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.120764 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h9mt5\" (UniqueName: \"kubernetes.io/projected/154da498-2c29-4ee5-ba31-850329d73177-kube-api-access-h9mt5\") pod \"nova-cell0-7ce3-account-create-ng7bk\" (UID: \"154da498-2c29-4ee5-ba31-850329d73177\") " pod="openstack/nova-cell0-7ce3-account-create-ng7bk" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.121378 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pvprb\" (UniqueName: \"kubernetes.io/projected/f16a661f-1590-4925-af88-6bc8deb8e227-kube-api-access-pvprb\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.121400 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.205492 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6a7-account-create-8sr5h" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.209263 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f16a661f-1590-4925-af88-6bc8deb8e227" (UID: "f16a661f-1590-4925-af88-6bc8deb8e227"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.223456 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xb5lq\" (UniqueName: \"kubernetes.io/projected/999c77ea-3263-4172-b950-3281d5f9034e-kube-api-access-xb5lq\") pod \"nova-cell1-26f6-account-create-hst2q\" (UID: \"999c77ea-3263-4172-b950-3281d5f9034e\") " pod="openstack/nova-cell1-26f6-account-create-hst2q" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.223652 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h9mt5\" (UniqueName: \"kubernetes.io/projected/154da498-2c29-4ee5-ba31-850329d73177-kube-api-access-h9mt5\") pod \"nova-cell0-7ce3-account-create-ng7bk\" (UID: \"154da498-2c29-4ee5-ba31-850329d73177\") " pod="openstack/nova-cell0-7ce3-account-create-ng7bk" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.223881 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f16a661f-1590-4925-af88-6bc8deb8e227-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.249584 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h9mt5\" (UniqueName: \"kubernetes.io/projected/154da498-2c29-4ee5-ba31-850329d73177-kube-api-access-h9mt5\") pod \"nova-cell0-7ce3-account-create-ng7bk\" (UID: \"154da498-2c29-4ee5-ba31-850329d73177\") " pod="openstack/nova-cell0-7ce3-account-create-ng7bk" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.325161 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xb5lq\" (UniqueName: \"kubernetes.io/projected/999c77ea-3263-4172-b950-3281d5f9034e-kube-api-access-xb5lq\") pod \"nova-cell1-26f6-account-create-hst2q\" (UID: \"999c77ea-3263-4172-b950-3281d5f9034e\") " pod="openstack/nova-cell1-26f6-account-create-hst2q" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.346972 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xb5lq\" (UniqueName: \"kubernetes.io/projected/999c77ea-3263-4172-b950-3281d5f9034e-kube-api-access-xb5lq\") pod \"nova-cell1-26f6-account-create-hst2q\" (UID: \"999c77ea-3263-4172-b950-3281d5f9034e\") " pod="openstack/nova-cell1-26f6-account-create-hst2q" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.399184 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7ce3-account-create-ng7bk" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.418968 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-26f6-account-create-hst2q" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.777987 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-c6a7-account-create-8sr5h"] Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.859326 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerStarted","Data":"9e9d84bbf32e7403dae90d6f270bd8a96354e185288afc8dc819ec916bb2853a"} Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.862320 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c6a7-account-create-8sr5h" event={"ID":"dec7f801-786b-472a-8e75-a4db5d89f15c","Type":"ContainerStarted","Data":"2ba8d3fd1eb3a825f9ce09e8f87b06923e7d1716ae53436d50bf504afe8c12b9"} Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.867203 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gk7v9" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.867867 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gk7v9" event={"ID":"f16a661f-1590-4925-af88-6bc8deb8e227","Type":"ContainerDied","Data":"f50ca8d166a2b863f095a8b72a55939162ba786be1c167c1a05a886372505d61"} Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.867924 4783 scope.go:117] "RemoveContainer" containerID="deb49821e76adeaab5b6fe0725d4c3497a49f304143890085cea573bf4f55ac7" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.905586 4783 scope.go:117] "RemoveContainer" containerID="c00e977c3a1c26702373235673c6e69c4dd95c2b8d7d12a98813e5327e360668" Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.907129 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gk7v9"] Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.934564 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gk7v9"] Oct 02 11:17:05 crc kubenswrapper[4783]: I1002 11:17:05.979903 4783 scope.go:117] "RemoveContainer" containerID="5b1c2a6aef6242eecb4daa12b3fdeb492af744841cf5588f0e97a67a366b65e1" Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.009034 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-26f6-account-create-hst2q"] Oct 02 11:17:06 crc kubenswrapper[4783]: W1002 11:17:06.020512 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod999c77ea_3263_4172_b950_3281d5f9034e.slice/crio-eada4eb16704a51712796059d2192ac27dcb9e99b102ed442954c8fc158ab942 WatchSource:0}: Error finding container eada4eb16704a51712796059d2192ac27dcb9e99b102ed442954c8fc158ab942: Status 404 returned error can't find the container with id eada4eb16704a51712796059d2192ac27dcb9e99b102ed442954c8fc158ab942 Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.276259 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-7ce3-account-create-ng7bk"] Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.875853 4783 generic.go:334] "Generic (PLEG): container finished" podID="dec7f801-786b-472a-8e75-a4db5d89f15c" containerID="1f96f2c30875af924334afcd548cc8c070cb1a14de538bb3f8ecc81fc2682e47" exitCode=0 Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.876227 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c6a7-account-create-8sr5h" event={"ID":"dec7f801-786b-472a-8e75-a4db5d89f15c","Type":"ContainerDied","Data":"1f96f2c30875af924334afcd548cc8c070cb1a14de538bb3f8ecc81fc2682e47"} Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.888566 4783 generic.go:334] "Generic (PLEG): container finished" podID="999c77ea-3263-4172-b950-3281d5f9034e" containerID="6a69d0ff96c3ed92e6fd8bafacb676d19d154b41bacc1d1dffa8937ddda457c0" exitCode=0 Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.888642 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-26f6-account-create-hst2q" event={"ID":"999c77ea-3263-4172-b950-3281d5f9034e","Type":"ContainerDied","Data":"6a69d0ff96c3ed92e6fd8bafacb676d19d154b41bacc1d1dffa8937ddda457c0"} Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.888669 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-26f6-account-create-hst2q" event={"ID":"999c77ea-3263-4172-b950-3281d5f9034e","Type":"ContainerStarted","Data":"eada4eb16704a51712796059d2192ac27dcb9e99b102ed442954c8fc158ab942"} Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.892703 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerStarted","Data":"282ba2977870ff8257593736694a70fcb76c80546acda0fc5c31e21fc1f9546d"} Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.898111 4783 generic.go:334] "Generic (PLEG): container finished" podID="154da498-2c29-4ee5-ba31-850329d73177" containerID="7eec719596d0f4fcf8513c04b7b2b884a13e424d80fca34e1425e6395ab14877" exitCode=0 Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.898462 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.898555 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.898702 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7ce3-account-create-ng7bk" event={"ID":"154da498-2c29-4ee5-ba31-850329d73177","Type":"ContainerDied","Data":"7eec719596d0f4fcf8513c04b7b2b884a13e424d80fca34e1425e6395ab14877"} Oct 02 11:17:06 crc kubenswrapper[4783]: I1002 11:17:06.898731 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7ce3-account-create-ng7bk" event={"ID":"154da498-2c29-4ee5-ba31-850329d73177","Type":"ContainerStarted","Data":"062c7e7df880083eb8e5450dceba927bcfcb2abda7f4ac9154d0a276b1634723"} Oct 02 11:17:07 crc kubenswrapper[4783]: I1002 11:17:07.598563 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f16a661f-1590-4925-af88-6bc8deb8e227" path="/var/lib/kubelet/pods/f16a661f-1590-4925-af88-6bc8deb8e227/volumes" Oct 02 11:17:07 crc kubenswrapper[4783]: I1002 11:17:07.909213 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerStarted","Data":"c213eff031f1ffc4af6844723bba4c1936760673c986f3fadc3c309efce7eb73"} Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.404640 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6a7-account-create-8sr5h" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.600772 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-26f6-account-create-hst2q" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.605129 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jgmk\" (UniqueName: \"kubernetes.io/projected/dec7f801-786b-472a-8e75-a4db5d89f15c-kube-api-access-6jgmk\") pod \"dec7f801-786b-472a-8e75-a4db5d89f15c\" (UID: \"dec7f801-786b-472a-8e75-a4db5d89f15c\") " Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.608476 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7ce3-account-create-ng7bk" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.614606 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dec7f801-786b-472a-8e75-a4db5d89f15c-kube-api-access-6jgmk" (OuterVolumeSpecName: "kube-api-access-6jgmk") pod "dec7f801-786b-472a-8e75-a4db5d89f15c" (UID: "dec7f801-786b-472a-8e75-a4db5d89f15c"). InnerVolumeSpecName "kube-api-access-6jgmk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.707573 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xb5lq\" (UniqueName: \"kubernetes.io/projected/999c77ea-3263-4172-b950-3281d5f9034e-kube-api-access-xb5lq\") pod \"999c77ea-3263-4172-b950-3281d5f9034e\" (UID: \"999c77ea-3263-4172-b950-3281d5f9034e\") " Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.708204 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jgmk\" (UniqueName: \"kubernetes.io/projected/dec7f801-786b-472a-8e75-a4db5d89f15c-kube-api-access-6jgmk\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.714945 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/999c77ea-3263-4172-b950-3281d5f9034e-kube-api-access-xb5lq" (OuterVolumeSpecName: "kube-api-access-xb5lq") pod "999c77ea-3263-4172-b950-3281d5f9034e" (UID: "999c77ea-3263-4172-b950-3281d5f9034e"). InnerVolumeSpecName "kube-api-access-xb5lq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.809957 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h9mt5\" (UniqueName: \"kubernetes.io/projected/154da498-2c29-4ee5-ba31-850329d73177-kube-api-access-h9mt5\") pod \"154da498-2c29-4ee5-ba31-850329d73177\" (UID: \"154da498-2c29-4ee5-ba31-850329d73177\") " Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.810397 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xb5lq\" (UniqueName: \"kubernetes.io/projected/999c77ea-3263-4172-b950-3281d5f9034e-kube-api-access-xb5lq\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.815638 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/154da498-2c29-4ee5-ba31-850329d73177-kube-api-access-h9mt5" (OuterVolumeSpecName: "kube-api-access-h9mt5") pod "154da498-2c29-4ee5-ba31-850329d73177" (UID: "154da498-2c29-4ee5-ba31-850329d73177"). InnerVolumeSpecName "kube-api-access-h9mt5". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.915759 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h9mt5\" (UniqueName: \"kubernetes.io/projected/154da498-2c29-4ee5-ba31-850329d73177-kube-api-access-h9mt5\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.920284 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerStarted","Data":"1e428061ee909d2958920fbc72160ad7b89baf4dc5e6efb59f844a7844952e66"} Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.920368 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-central-agent" containerID="cri-o://9e9d84bbf32e7403dae90d6f270bd8a96354e185288afc8dc819ec916bb2853a" gracePeriod=30 Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.920458 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.920478 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="sg-core" containerID="cri-o://c213eff031f1ffc4af6844723bba4c1936760673c986f3fadc3c309efce7eb73" gracePeriod=30 Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.920467 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="proxy-httpd" containerID="cri-o://1e428061ee909d2958920fbc72160ad7b89baf4dc5e6efb59f844a7844952e66" gracePeriod=30 Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.920518 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-notification-agent" containerID="cri-o://282ba2977870ff8257593736694a70fcb76c80546acda0fc5c31e21fc1f9546d" gracePeriod=30 Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.927270 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-7ce3-account-create-ng7bk" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.927038 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-7ce3-account-create-ng7bk" event={"ID":"154da498-2c29-4ee5-ba31-850329d73177","Type":"ContainerDied","Data":"062c7e7df880083eb8e5450dceba927bcfcb2abda7f4ac9154d0a276b1634723"} Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.928659 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="062c7e7df880083eb8e5450dceba927bcfcb2abda7f4ac9154d0a276b1634723" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.946584 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-c6a7-account-create-8sr5h" event={"ID":"dec7f801-786b-472a-8e75-a4db5d89f15c","Type":"ContainerDied","Data":"2ba8d3fd1eb3a825f9ce09e8f87b06923e7d1716ae53436d50bf504afe8c12b9"} Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.946634 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="2ba8d3fd1eb3a825f9ce09e8f87b06923e7d1716ae53436d50bf504afe8c12b9" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.946711 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-c6a7-account-create-8sr5h" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.949821 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.484189225 podStartE2EDuration="6.949805969s" podCreationTimestamp="2025-10-02 11:17:02 +0000 UTC" firstStartedPulling="2025-10-02 11:17:03.030510217 +0000 UTC m=+1456.346704478" lastFinishedPulling="2025-10-02 11:17:08.496126961 +0000 UTC m=+1461.812321222" observedRunningTime="2025-10-02 11:17:08.94181789 +0000 UTC m=+1462.258012151" watchObservedRunningTime="2025-10-02 11:17:08.949805969 +0000 UTC m=+1462.266000230" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.960217 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-26f6-account-create-hst2q" event={"ID":"999c77ea-3263-4172-b950-3281d5f9034e","Type":"ContainerDied","Data":"eada4eb16704a51712796059d2192ac27dcb9e99b102ed442954c8fc158ab942"} Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.960252 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eada4eb16704a51712796059d2192ac27dcb9e99b102ed442954c8fc158ab942" Oct 02 11:17:08 crc kubenswrapper[4783]: I1002 11:17:08.960278 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-26f6-account-create-hst2q" Oct 02 11:17:09 crc kubenswrapper[4783]: E1002 11:17:09.152734 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a63fe06_d57d_47a3_8a9f_0c62ebe1896b.slice/crio-conmon-c213eff031f1ffc4af6844723bba4c1936760673c986f3fadc3c309efce7eb73.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a63fe06_d57d_47a3_8a9f_0c62ebe1896b.slice/crio-c213eff031f1ffc4af6844723bba4c1936760673c986f3fadc3c309efce7eb73.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod154da498_2c29_4ee5_ba31_850329d73177.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod999c77ea_3263_4172_b950_3281d5f9034e.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddec7f801_786b_472a_8e75_a4db5d89f15c.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod154da498_2c29_4ee5_ba31_850329d73177.slice/crio-062c7e7df880083eb8e5450dceba927bcfcb2abda7f4ac9154d0a276b1634723\": RecentStats: unable to find data in memory cache]" Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.917797 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.918204 4783 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.934060 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.973541 4783 generic.go:334] "Generic (PLEG): container finished" podID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerID="1e428061ee909d2958920fbc72160ad7b89baf4dc5e6efb59f844a7844952e66" exitCode=0 Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.973578 4783 generic.go:334] "Generic (PLEG): container finished" podID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerID="c213eff031f1ffc4af6844723bba4c1936760673c986f3fadc3c309efce7eb73" exitCode=2 Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.973586 4783 generic.go:334] "Generic (PLEG): container finished" podID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerID="282ba2977870ff8257593736694a70fcb76c80546acda0fc5c31e21fc1f9546d" exitCode=0 Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.974375 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerDied","Data":"1e428061ee909d2958920fbc72160ad7b89baf4dc5e6efb59f844a7844952e66"} Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.974412 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerDied","Data":"c213eff031f1ffc4af6844723bba4c1936760673c986f3fadc3c309efce7eb73"} Oct 02 11:17:09 crc kubenswrapper[4783]: I1002 11:17:09.974422 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerDied","Data":"282ba2977870ff8257593736694a70fcb76c80546acda0fc5c31e21fc1f9546d"} Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.462806 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tlclg"] Oct 02 11:17:10 crc kubenswrapper[4783]: E1002 11:17:10.463600 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="154da498-2c29-4ee5-ba31-850329d73177" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.463691 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="154da498-2c29-4ee5-ba31-850329d73177" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: E1002 11:17:10.463805 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="999c77ea-3263-4172-b950-3281d5f9034e" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.463869 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="999c77ea-3263-4172-b950-3281d5f9034e" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: E1002 11:17:10.463933 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dec7f801-786b-472a-8e75-a4db5d89f15c" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.463988 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="dec7f801-786b-472a-8e75-a4db5d89f15c" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.464205 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="dec7f801-786b-472a-8e75-a4db5d89f15c" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.464289 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="154da498-2c29-4ee5-ba31-850329d73177" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.464346 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="999c77ea-3263-4172-b950-3281d5f9034e" containerName="mariadb-account-create" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.464987 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.472538 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-xmlzl" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.472733 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.472939 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.476091 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tlclg"] Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.648988 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.649053 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-scripts\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.649108 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-config-data\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.649270 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bgczz\" (UniqueName: \"kubernetes.io/projected/fba98d7a-3a8d-43dd-b494-a7769328b96e-kube-api-access-bgczz\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.751037 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bgczz\" (UniqueName: \"kubernetes.io/projected/fba98d7a-3a8d-43dd-b494-a7769328b96e-kube-api-access-bgczz\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.751092 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.751116 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-scripts\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.751140 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-config-data\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.759074 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-scripts\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.759390 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-config-data\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.784750 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.789485 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bgczz\" (UniqueName: \"kubernetes.io/projected/fba98d7a-3a8d-43dd-b494-a7769328b96e-kube-api-access-bgczz\") pod \"nova-cell0-conductor-db-sync-tlclg\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:10 crc kubenswrapper[4783]: I1002 11:17:10.796684 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:11 crc kubenswrapper[4783]: I1002 11:17:11.371592 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tlclg"] Oct 02 11:17:12 crc kubenswrapper[4783]: I1002 11:17:12.031579 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tlclg" event={"ID":"fba98d7a-3a8d-43dd-b494-a7769328b96e","Type":"ContainerStarted","Data":"c776567d959822059333cc0b9305fa014f36511fdb927830896570bb1ab00341"} Oct 02 11:17:12 crc kubenswrapper[4783]: I1002 11:17:12.980659 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:17:13 crc kubenswrapper[4783]: I1002 11:17:13.114115 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.406432 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-6rdg6"] Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.409672 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.419815 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6rdg6"] Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.523728 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-utilities\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.523859 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-catalog-content\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.523952 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bd5mj\" (UniqueName: \"kubernetes.io/projected/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-kube-api-access-bd5mj\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.627261 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bd5mj\" (UniqueName: \"kubernetes.io/projected/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-kube-api-access-bd5mj\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.627373 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-utilities\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.627545 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-catalog-content\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.632968 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-catalog-content\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.633191 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-utilities\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.648519 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bd5mj\" (UniqueName: \"kubernetes.io/projected/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-kube-api-access-bd5mj\") pod \"certified-operators-6rdg6\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:14 crc kubenswrapper[4783]: I1002 11:17:14.756855 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:15 crc kubenswrapper[4783]: I1002 11:17:15.398284 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-6rdg6"] Oct 02 11:17:19 crc kubenswrapper[4783]: I1002 11:17:19.148196 4783 generic.go:334] "Generic (PLEG): container finished" podID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerID="9e9d84bbf32e7403dae90d6f270bd8a96354e185288afc8dc819ec916bb2853a" exitCode=0 Oct 02 11:17:19 crc kubenswrapper[4783]: I1002 11:17:19.148451 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerDied","Data":"9e9d84bbf32e7403dae90d6f270bd8a96354e185288afc8dc819ec916bb2853a"} Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.190351 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rdg6" event={"ID":"733b95bc-bf98-4482-ade1-ebabbbcc6cf1","Type":"ContainerStarted","Data":"22376b770af2f1b2211cbbc7e2d9422aa4196003ce5e11e39aca58ebeb39a2b1"} Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.421784 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513291 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-sg-core-conf-yaml\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513613 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-combined-ca-bundle\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513662 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-twtzb\" (UniqueName: \"kubernetes.io/projected/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-kube-api-access-twtzb\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513702 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-config-data\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513721 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-log-httpd\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513780 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-ceilometer-tls-certs\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513838 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-scripts\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.513885 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-run-httpd\") pod \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\" (UID: \"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b\") " Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.514906 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.515615 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.524655 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-scripts" (OuterVolumeSpecName: "scripts") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.528618 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-kube-api-access-twtzb" (OuterVolumeSpecName: "kube-api-access-twtzb") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "kube-api-access-twtzb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.622592 4783 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.622620 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.622628 4783 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.622636 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-twtzb\" (UniqueName: \"kubernetes.io/projected/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-kube-api-access-twtzb\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.658063 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.673950 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.711215 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.724562 4783 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.724608 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.724622 4783 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.750659 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-config-data" (OuterVolumeSpecName: "config-data") pod "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" (UID: "0a63fe06-d57d-47a3-8a9f-0c62ebe1896b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:22 crc kubenswrapper[4783]: I1002 11:17:22.826576 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.202360 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.202364 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"0a63fe06-d57d-47a3-8a9f-0c62ebe1896b","Type":"ContainerDied","Data":"bbcc133e16ebdb9095795c909f85a3d698b1a6921b154700d3be3d4800618ec8"} Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.202834 4783 scope.go:117] "RemoveContainer" containerID="1e428061ee909d2958920fbc72160ad7b89baf4dc5e6efb59f844a7844952e66" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.204482 4783 generic.go:334] "Generic (PLEG): container finished" podID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerID="4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba" exitCode=0 Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.204571 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rdg6" event={"ID":"733b95bc-bf98-4482-ade1-ebabbbcc6cf1","Type":"ContainerDied","Data":"4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba"} Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.205995 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tlclg" event={"ID":"fba98d7a-3a8d-43dd-b494-a7769328b96e","Type":"ContainerStarted","Data":"3250caf9baa85febd54656b6bb83aff29d6e1374aaf7417fe3b9711ee8687cce"} Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.243379 4783 scope.go:117] "RemoveContainer" containerID="c213eff031f1ffc4af6844723bba4c1936760673c986f3fadc3c309efce7eb73" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.248772 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-tlclg" podStartSLOduration=2.4767286410000002 podStartE2EDuration="13.248758521s" podCreationTimestamp="2025-10-02 11:17:10 +0000 UTC" firstStartedPulling="2025-10-02 11:17:11.39153273 +0000 UTC m=+1464.707726991" lastFinishedPulling="2025-10-02 11:17:22.16356261 +0000 UTC m=+1475.479756871" observedRunningTime="2025-10-02 11:17:23.243997181 +0000 UTC m=+1476.560191442" watchObservedRunningTime="2025-10-02 11:17:23.248758521 +0000 UTC m=+1476.564952782" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.299253 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.301567 4783 scope.go:117] "RemoveContainer" containerID="282ba2977870ff8257593736694a70fcb76c80546acda0fc5c31e21fc1f9546d" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.306429 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.334645 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:23 crc kubenswrapper[4783]: E1002 11:17:23.335010 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="sg-core" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335025 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="sg-core" Oct 02 11:17:23 crc kubenswrapper[4783]: E1002 11:17:23.335051 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-central-agent" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335058 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-central-agent" Oct 02 11:17:23 crc kubenswrapper[4783]: E1002 11:17:23.335074 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-notification-agent" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335080 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-notification-agent" Oct 02 11:17:23 crc kubenswrapper[4783]: E1002 11:17:23.335091 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="proxy-httpd" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335097 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="proxy-httpd" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335254 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="sg-core" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335272 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-central-agent" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335284 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="proxy-httpd" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.335296 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" containerName="ceilometer-notification-agent" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.341588 4783 scope.go:117] "RemoveContainer" containerID="9e9d84bbf32e7403dae90d6f270bd8a96354e185288afc8dc819ec916bb2853a" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.344134 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.366319 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.366525 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.366688 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.376669 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465461 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-scripts\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465528 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-log-httpd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465561 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-run-httpd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465638 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465669 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-config-data\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465701 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465727 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpsfd\" (UniqueName: \"kubernetes.io/projected/cbdb97bd-c226-45ae-943c-04c8530c6df7-kube-api-access-wpsfd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.465744 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.555804 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a63fe06-d57d-47a3-8a9f-0c62ebe1896b" path="/var/lib/kubelet/pods/0a63fe06-d57d-47a3-8a9f-0c62ebe1896b/volumes" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.567398 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-log-httpd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.567646 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-run-httpd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.567860 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.568002 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-config-data\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.568134 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.568258 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpsfd\" (UniqueName: \"kubernetes.io/projected/cbdb97bd-c226-45ae-943c-04c8530c6df7-kube-api-access-wpsfd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.568357 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.568552 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-scripts\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.568669 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-run-httpd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.567887 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-log-httpd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.574051 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-config-data\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.574303 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.574703 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.583045 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-scripts\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.583599 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.588871 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpsfd\" (UniqueName: \"kubernetes.io/projected/cbdb97bd-c226-45ae-943c-04c8530c6df7-kube-api-access-wpsfd\") pod \"ceilometer-0\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " pod="openstack/ceilometer-0" Oct 02 11:17:23 crc kubenswrapper[4783]: I1002 11:17:23.705774 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:17:24 crc kubenswrapper[4783]: I1002 11:17:24.195629 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:17:24 crc kubenswrapper[4783]: W1002 11:17:24.198826 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podcbdb97bd_c226_45ae_943c_04c8530c6df7.slice/crio-d796b59aeddd54b047d26608f99fa4522059abf8ec5c7d14010ed88d9f70669d WatchSource:0}: Error finding container d796b59aeddd54b047d26608f99fa4522059abf8ec5c7d14010ed88d9f70669d: Status 404 returned error can't find the container with id d796b59aeddd54b047d26608f99fa4522059abf8ec5c7d14010ed88d9f70669d Oct 02 11:17:24 crc kubenswrapper[4783]: I1002 11:17:24.217783 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerStarted","Data":"d796b59aeddd54b047d26608f99fa4522059abf8ec5c7d14010ed88d9f70669d"} Oct 02 11:17:25 crc kubenswrapper[4783]: I1002 11:17:25.226815 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rdg6" event={"ID":"733b95bc-bf98-4482-ade1-ebabbbcc6cf1","Type":"ContainerStarted","Data":"cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648"} Oct 02 11:17:27 crc kubenswrapper[4783]: I1002 11:17:27.243640 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerStarted","Data":"8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121"} Oct 02 11:17:27 crc kubenswrapper[4783]: I1002 11:17:27.984615 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:17:27 crc kubenswrapper[4783]: I1002 11:17:27.984697 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:17:27 crc kubenswrapper[4783]: I1002 11:17:27.985445 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"c82809b9c6eba0a8170866eb1d26bf56ad2341d1b01f79a18fbfd51af89b92ce"} pod="openstack/horizon-5fcdf587dd-wvthh" containerMessage="Container horizon failed startup probe, will be restarted" Oct 02 11:17:27 crc kubenswrapper[4783]: I1002 11:17:27.985484 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" containerID="cri-o://c82809b9c6eba0a8170866eb1d26bf56ad2341d1b01f79a18fbfd51af89b92ce" gracePeriod=30 Oct 02 11:17:28 crc kubenswrapper[4783]: I1002 11:17:28.116707 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:17:28 crc kubenswrapper[4783]: I1002 11:17:28.117068 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:17:28 crc kubenswrapper[4783]: I1002 11:17:28.118519 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="horizon" containerStatusID={"Type":"cri-o","ID":"76e35b57de4991d61267b7cf5435417f916259cc2da6cb259292b12054c98ddd"} pod="openstack/horizon-567b57d86d-gv6fq" containerMessage="Container horizon failed startup probe, will be restarted" Oct 02 11:17:28 crc kubenswrapper[4783]: I1002 11:17:28.118692 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" containerID="cri-o://76e35b57de4991d61267b7cf5435417f916259cc2da6cb259292b12054c98ddd" gracePeriod=30 Oct 02 11:17:33 crc kubenswrapper[4783]: I1002 11:17:33.304382 4783 generic.go:334] "Generic (PLEG): container finished" podID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerID="cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648" exitCode=0 Oct 02 11:17:33 crc kubenswrapper[4783]: I1002 11:17:33.304468 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rdg6" event={"ID":"733b95bc-bf98-4482-ade1-ebabbbcc6cf1","Type":"ContainerDied","Data":"cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648"} Oct 02 11:17:33 crc kubenswrapper[4783]: I1002 11:17:33.307016 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerStarted","Data":"28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715"} Oct 02 11:17:34 crc kubenswrapper[4783]: I1002 11:17:34.319300 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerStarted","Data":"21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09"} Oct 02 11:17:34 crc kubenswrapper[4783]: I1002 11:17:34.322095 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rdg6" event={"ID":"733b95bc-bf98-4482-ade1-ebabbbcc6cf1","Type":"ContainerStarted","Data":"44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba"} Oct 02 11:17:34 crc kubenswrapper[4783]: I1002 11:17:34.350004 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-6rdg6" podStartSLOduration=9.79129899 podStartE2EDuration="20.349984733s" podCreationTimestamp="2025-10-02 11:17:14 +0000 UTC" firstStartedPulling="2025-10-02 11:17:23.206927766 +0000 UTC m=+1476.523122027" lastFinishedPulling="2025-10-02 11:17:33.765613509 +0000 UTC m=+1487.081807770" observedRunningTime="2025-10-02 11:17:34.344111433 +0000 UTC m=+1487.660305714" watchObservedRunningTime="2025-10-02 11:17:34.349984733 +0000 UTC m=+1487.666178994" Oct 02 11:17:34 crc kubenswrapper[4783]: I1002 11:17:34.757813 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:34 crc kubenswrapper[4783]: I1002 11:17:34.758276 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:35 crc kubenswrapper[4783]: I1002 11:17:35.334372 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerStarted","Data":"2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761"} Oct 02 11:17:35 crc kubenswrapper[4783]: I1002 11:17:35.356854 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.696869596 podStartE2EDuration="12.356827571s" podCreationTimestamp="2025-10-02 11:17:23 +0000 UTC" firstStartedPulling="2025-10-02 11:17:24.222442311 +0000 UTC m=+1477.538636572" lastFinishedPulling="2025-10-02 11:17:34.882400286 +0000 UTC m=+1488.198594547" observedRunningTime="2025-10-02 11:17:35.351047473 +0000 UTC m=+1488.667241734" watchObservedRunningTime="2025-10-02 11:17:35.356827571 +0000 UTC m=+1488.673021832" Oct 02 11:17:35 crc kubenswrapper[4783]: I1002 11:17:35.807844 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-6rdg6" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="registry-server" probeResult="failure" output=< Oct 02 11:17:35 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:17:35 crc kubenswrapper[4783]: > Oct 02 11:17:36 crc kubenswrapper[4783]: I1002 11:17:36.342445 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.361709 4783 generic.go:334] "Generic (PLEG): container finished" podID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerID="c82809b9c6eba0a8170866eb1d26bf56ad2341d1b01f79a18fbfd51af89b92ce" exitCode=0 Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.362117 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerDied","Data":"c82809b9c6eba0a8170866eb1d26bf56ad2341d1b01f79a18fbfd51af89b92ce"} Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.362144 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerStarted","Data":"ab15a3cd7992b45c329572700c99718692ab18197b5fb99e472dd12d4fcae8ec"} Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.362160 4783 scope.go:117] "RemoveContainer" containerID="9a79bf128cd748cac66115e7e4373946c6413ab941b85a301fa5e56d361363ac" Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.370080 4783 generic.go:334] "Generic (PLEG): container finished" podID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerID="76e35b57de4991d61267b7cf5435417f916259cc2da6cb259292b12054c98ddd" exitCode=0 Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.370214 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerDied","Data":"76e35b57de4991d61267b7cf5435417f916259cc2da6cb259292b12054c98ddd"} Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.370312 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-567b57d86d-gv6fq" event={"ID":"48c11fb6-76f0-4028-a76f-6f67904bf3aa","Type":"ContainerStarted","Data":"9751cd629aa641cbbbaf0ff8a895d0a750fa4f48a87b7f82350c646df51cb988"} Oct 02 11:17:37 crc kubenswrapper[4783]: I1002 11:17:37.582708 4783 scope.go:117] "RemoveContainer" containerID="eaaf5ac1ab8de4cd11862bbd9387b781b9c059890b204852bc3fb6e13f34a239" Oct 02 11:17:42 crc kubenswrapper[4783]: I1002 11:17:42.980592 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:17:42 crc kubenswrapper[4783]: I1002 11:17:42.981160 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:17:43 crc kubenswrapper[4783]: I1002 11:17:43.113816 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:17:43 crc kubenswrapper[4783]: I1002 11:17:43.113880 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:17:45 crc kubenswrapper[4783]: I1002 11:17:45.813647 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-6rdg6" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="registry-server" probeResult="failure" output=< Oct 02 11:17:45 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:17:45 crc kubenswrapper[4783]: > Oct 02 11:17:47 crc kubenswrapper[4783]: I1002 11:17:47.478821 4783 generic.go:334] "Generic (PLEG): container finished" podID="fba98d7a-3a8d-43dd-b494-a7769328b96e" containerID="3250caf9baa85febd54656b6bb83aff29d6e1374aaf7417fe3b9711ee8687cce" exitCode=0 Oct 02 11:17:47 crc kubenswrapper[4783]: I1002 11:17:47.478928 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tlclg" event={"ID":"fba98d7a-3a8d-43dd-b494-a7769328b96e","Type":"ContainerDied","Data":"3250caf9baa85febd54656b6bb83aff29d6e1374aaf7417fe3b9711ee8687cce"} Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.811669 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.864233 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-config-data\") pod \"fba98d7a-3a8d-43dd-b494-a7769328b96e\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.864321 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-combined-ca-bundle\") pod \"fba98d7a-3a8d-43dd-b494-a7769328b96e\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.864357 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-scripts\") pod \"fba98d7a-3a8d-43dd-b494-a7769328b96e\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.864541 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bgczz\" (UniqueName: \"kubernetes.io/projected/fba98d7a-3a8d-43dd-b494-a7769328b96e-kube-api-access-bgczz\") pod \"fba98d7a-3a8d-43dd-b494-a7769328b96e\" (UID: \"fba98d7a-3a8d-43dd-b494-a7769328b96e\") " Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.890645 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fba98d7a-3a8d-43dd-b494-a7769328b96e-kube-api-access-bgczz" (OuterVolumeSpecName: "kube-api-access-bgczz") pod "fba98d7a-3a8d-43dd-b494-a7769328b96e" (UID: "fba98d7a-3a8d-43dd-b494-a7769328b96e"). InnerVolumeSpecName "kube-api-access-bgczz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.891152 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-scripts" (OuterVolumeSpecName: "scripts") pod "fba98d7a-3a8d-43dd-b494-a7769328b96e" (UID: "fba98d7a-3a8d-43dd-b494-a7769328b96e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.899633 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fba98d7a-3a8d-43dd-b494-a7769328b96e" (UID: "fba98d7a-3a8d-43dd-b494-a7769328b96e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.929531 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-config-data" (OuterVolumeSpecName: "config-data") pod "fba98d7a-3a8d-43dd-b494-a7769328b96e" (UID: "fba98d7a-3a8d-43dd-b494-a7769328b96e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.967607 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.967868 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.967970 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fba98d7a-3a8d-43dd-b494-a7769328b96e-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:48 crc kubenswrapper[4783]: I1002 11:17:48.968114 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bgczz\" (UniqueName: \"kubernetes.io/projected/fba98d7a-3a8d-43dd-b494-a7769328b96e-kube-api-access-bgczz\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.500561 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-tlclg" event={"ID":"fba98d7a-3a8d-43dd-b494-a7769328b96e","Type":"ContainerDied","Data":"c776567d959822059333cc0b9305fa014f36511fdb927830896570bb1ab00341"} Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.500886 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c776567d959822059333cc0b9305fa014f36511fdb927830896570bb1ab00341" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.500640 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-tlclg" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.637201 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 02 11:17:49 crc kubenswrapper[4783]: E1002 11:17:49.638496 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fba98d7a-3a8d-43dd-b494-a7769328b96e" containerName="nova-cell0-conductor-db-sync" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.638613 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fba98d7a-3a8d-43dd-b494-a7769328b96e" containerName="nova-cell0-conductor-db-sync" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.638991 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="fba98d7a-3a8d-43dd-b494-a7769328b96e" containerName="nova-cell0-conductor-db-sync" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.639842 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.647921 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.647972 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-xmlzl" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.669057 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.686645 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88323f1b-d9ae-4044-9a23-455194f06041-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.686689 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88323f1b-d9ae-4044-9a23-455194f06041-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.686784 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ndthx\" (UniqueName: \"kubernetes.io/projected/88323f1b-d9ae-4044-9a23-455194f06041-kube-api-access-ndthx\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.787932 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88323f1b-d9ae-4044-9a23-455194f06041-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.787975 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88323f1b-d9ae-4044-9a23-455194f06041-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.788022 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ndthx\" (UniqueName: \"kubernetes.io/projected/88323f1b-d9ae-4044-9a23-455194f06041-kube-api-access-ndthx\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.792243 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/88323f1b-d9ae-4044-9a23-455194f06041-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.801188 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/88323f1b-d9ae-4044-9a23-455194f06041-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.809975 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ndthx\" (UniqueName: \"kubernetes.io/projected/88323f1b-d9ae-4044-9a23-455194f06041-kube-api-access-ndthx\") pod \"nova-cell0-conductor-0\" (UID: \"88323f1b-d9ae-4044-9a23-455194f06041\") " pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:49 crc kubenswrapper[4783]: I1002 11:17:49.967313 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:50 crc kubenswrapper[4783]: I1002 11:17:50.441528 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Oct 02 11:17:50 crc kubenswrapper[4783]: I1002 11:17:50.513117 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"88323f1b-d9ae-4044-9a23-455194f06041","Type":"ContainerStarted","Data":"5f5d85535298d013b484b5632b8031e3a724407302014d1ed825e95387583c17"} Oct 02 11:17:51 crc kubenswrapper[4783]: I1002 11:17:51.514442 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:17:51 crc kubenswrapper[4783]: I1002 11:17:51.514915 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:17:51 crc kubenswrapper[4783]: I1002 11:17:51.525529 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"88323f1b-d9ae-4044-9a23-455194f06041","Type":"ContainerStarted","Data":"6d0f62cf85957837aa017c846e2e5e961e16d61cd1da3290edb10da540f9c379"} Oct 02 11:17:51 crc kubenswrapper[4783]: I1002 11:17:51.526479 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Oct 02 11:17:51 crc kubenswrapper[4783]: I1002 11:17:51.586978 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.5869148600000003 podStartE2EDuration="2.58691486s" podCreationTimestamp="2025-10-02 11:17:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:17:51.543699817 +0000 UTC m=+1504.859894078" watchObservedRunningTime="2025-10-02 11:17:51.58691486 +0000 UTC m=+1504.903109141" Oct 02 11:17:52 crc kubenswrapper[4783]: I1002 11:17:52.982536 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:17:53 crc kubenswrapper[4783]: I1002 11:17:53.116598 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.144:8443: connect: connection refused" Oct 02 11:17:53 crc kubenswrapper[4783]: I1002 11:17:53.725763 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 02 11:17:54 crc kubenswrapper[4783]: I1002 11:17:54.802242 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:54 crc kubenswrapper[4783]: I1002 11:17:54.862869 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:55 crc kubenswrapper[4783]: I1002 11:17:55.054602 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6rdg6"] Oct 02 11:17:56 crc kubenswrapper[4783]: I1002 11:17:56.576213 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-6rdg6" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="registry-server" containerID="cri-o://44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba" gracePeriod=2 Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.029670 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.131955 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-utilities\") pod \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.132125 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bd5mj\" (UniqueName: \"kubernetes.io/projected/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-kube-api-access-bd5mj\") pod \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.132163 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-catalog-content\") pod \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\" (UID: \"733b95bc-bf98-4482-ade1-ebabbbcc6cf1\") " Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.132602 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-utilities" (OuterVolumeSpecName: "utilities") pod "733b95bc-bf98-4482-ade1-ebabbbcc6cf1" (UID: "733b95bc-bf98-4482-ade1-ebabbbcc6cf1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.147610 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-kube-api-access-bd5mj" (OuterVolumeSpecName: "kube-api-access-bd5mj") pod "733b95bc-bf98-4482-ade1-ebabbbcc6cf1" (UID: "733b95bc-bf98-4482-ade1-ebabbbcc6cf1"). InnerVolumeSpecName "kube-api-access-bd5mj". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.165300 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "733b95bc-bf98-4482-ade1-ebabbbcc6cf1" (UID: "733b95bc-bf98-4482-ade1-ebabbbcc6cf1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.234608 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bd5mj\" (UniqueName: \"kubernetes.io/projected/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-kube-api-access-bd5mj\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.234646 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.234656 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/733b95bc-bf98-4482-ade1-ebabbbcc6cf1-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.586853 4783 generic.go:334] "Generic (PLEG): container finished" podID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerID="44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba" exitCode=0 Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.586910 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rdg6" event={"ID":"733b95bc-bf98-4482-ade1-ebabbbcc6cf1","Type":"ContainerDied","Data":"44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba"} Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.586944 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-6rdg6" event={"ID":"733b95bc-bf98-4482-ade1-ebabbbcc6cf1","Type":"ContainerDied","Data":"22376b770af2f1b2211cbbc7e2d9422aa4196003ce5e11e39aca58ebeb39a2b1"} Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.586972 4783 scope.go:117] "RemoveContainer" containerID="44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.587156 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-6rdg6" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.614134 4783 scope.go:117] "RemoveContainer" containerID="cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.626521 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-6rdg6"] Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.637212 4783 scope.go:117] "RemoveContainer" containerID="4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.639275 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-6rdg6"] Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.685561 4783 scope.go:117] "RemoveContainer" containerID="44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba" Oct 02 11:17:57 crc kubenswrapper[4783]: E1002 11:17:57.686221 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba\": container with ID starting with 44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba not found: ID does not exist" containerID="44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.686284 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba"} err="failed to get container status \"44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba\": rpc error: code = NotFound desc = could not find container \"44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba\": container with ID starting with 44d6974ef3e79ed30b051e5d6967274c2abad5340395524954dd6fad63c735ba not found: ID does not exist" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.686322 4783 scope.go:117] "RemoveContainer" containerID="cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648" Oct 02 11:17:57 crc kubenswrapper[4783]: E1002 11:17:57.686667 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648\": container with ID starting with cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648 not found: ID does not exist" containerID="cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.686715 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648"} err="failed to get container status \"cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648\": rpc error: code = NotFound desc = could not find container \"cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648\": container with ID starting with cbf4f9f9c22576c8df0243ce135970577608eee3b03585310938fc7a50553648 not found: ID does not exist" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.686736 4783 scope.go:117] "RemoveContainer" containerID="4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba" Oct 02 11:17:57 crc kubenswrapper[4783]: E1002 11:17:57.687132 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba\": container with ID starting with 4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba not found: ID does not exist" containerID="4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba" Oct 02 11:17:57 crc kubenswrapper[4783]: I1002 11:17:57.687171 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba"} err="failed to get container status \"4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba\": rpc error: code = NotFound desc = could not find container \"4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba\": container with ID starting with 4ad8febb601b117dfc3849ff33e5657b98437c1c009104d251e79fef894a37ba not found: ID does not exist" Oct 02 11:17:59 crc kubenswrapper[4783]: I1002 11:17:59.565673 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" path="/var/lib/kubelet/pods/733b95bc-bf98-4482-ade1-ebabbbcc6cf1/volumes" Oct 02 11:17:59 crc kubenswrapper[4783]: I1002 11:17:59.999313 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.632264 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-m7lqg"] Oct 02 11:18:00 crc kubenswrapper[4783]: E1002 11:18:00.636500 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="extract-utilities" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.636586 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="extract-utilities" Oct 02 11:18:00 crc kubenswrapper[4783]: E1002 11:18:00.636609 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="registry-server" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.636617 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="registry-server" Oct 02 11:18:00 crc kubenswrapper[4783]: E1002 11:18:00.636641 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="extract-content" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.636655 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="extract-content" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.636907 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="733b95bc-bf98-4482-ade1-ebabbbcc6cf1" containerName="registry-server" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.639254 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.643804 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.646374 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-m7lqg"] Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.660172 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.719177 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.719353 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-config-data\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.719391 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-scripts\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.719536 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pxtrh\" (UniqueName: \"kubernetes.io/projected/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-kube-api-access-pxtrh\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.821765 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.821902 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-config-data\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.821931 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-scripts\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.821959 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pxtrh\" (UniqueName: \"kubernetes.io/projected/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-kube-api-access-pxtrh\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.830577 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.832027 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-config-data\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.862633 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-scripts\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.872746 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.874246 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.879097 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.881667 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pxtrh\" (UniqueName: \"kubernetes.io/projected/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-kube-api-access-pxtrh\") pod \"nova-cell0-cell-mapping-m7lqg\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.926977 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xlkqb\" (UniqueName: \"kubernetes.io/projected/6a18cba8-6810-488a-8538-ef42278f7162-kube-api-access-xlkqb\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.927031 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-config-data\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.927108 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.929648 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.947716 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.949368 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.958637 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 02 11:18:00 crc kubenswrapper[4783]: I1002 11:18:00.968841 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.010922 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.029110 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-config-data\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.029214 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-config-data\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.029245 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.029288 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.029304 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/181e715a-cbe0-49a9-99af-670ff225dcf2-logs\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.029328 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9fjnh\" (UniqueName: \"kubernetes.io/projected/181e715a-cbe0-49a9-99af-670ff225dcf2-kube-api-access-9fjnh\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.029379 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xlkqb\" (UniqueName: \"kubernetes.io/projected/6a18cba8-6810-488a-8538-ef42278f7162-kube-api-access-xlkqb\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.030958 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.041106 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.041243 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-config-data\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.041310 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.054279 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.086264 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.098549 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xlkqb\" (UniqueName: \"kubernetes.io/projected/6a18cba8-6810-488a-8538-ef42278f7162-kube-api-access-xlkqb\") pod \"nova-scheduler-0\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.134230 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9fjnh\" (UniqueName: \"kubernetes.io/projected/181e715a-cbe0-49a9-99af-670ff225dcf2-kube-api-access-9fjnh\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.134331 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgzn8\" (UniqueName: \"kubernetes.io/projected/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-kube-api-access-wgzn8\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.134373 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.134458 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-config-data\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.134509 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.134530 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.134546 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/181e715a-cbe0-49a9-99af-670ff225dcf2-logs\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.135081 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/181e715a-cbe0-49a9-99af-670ff225dcf2-logs\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.143156 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.152941 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-config-data\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.166923 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9fjnh\" (UniqueName: \"kubernetes.io/projected/181e715a-cbe0-49a9-99af-670ff225dcf2-kube-api-access-9fjnh\") pod \"nova-api-0\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.192137 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.193712 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.195167 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.199046 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.236172 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mgj7z\" (UniqueName: \"kubernetes.io/projected/0885d8af-616a-47ab-aa88-37dc12e0edf0-kube-api-access-mgj7z\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.236270 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-config-data\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.236313 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.236421 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.236441 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0885d8af-616a-47ab-aa88-37dc12e0edf0-logs\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.236461 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgzn8\" (UniqueName: \"kubernetes.io/projected/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-kube-api-access-wgzn8\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.236483 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.244710 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.245241 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.286961 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgzn8\" (UniqueName: \"kubernetes.io/projected/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-kube-api-access-wgzn8\") pod \"nova-cell1-novncproxy-0\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.298698 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.338571 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-config-data\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.338933 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.338958 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0885d8af-616a-47ab-aa88-37dc12e0edf0-logs\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.338995 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mgj7z\" (UniqueName: \"kubernetes.io/projected/0885d8af-616a-47ab-aa88-37dc12e0edf0-kube-api-access-mgj7z\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.341381 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0885d8af-616a-47ab-aa88-37dc12e0edf0-logs\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.368046 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-76xxs"] Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.369993 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.389982 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.394310 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mgj7z\" (UniqueName: \"kubernetes.io/projected/0885d8af-616a-47ab-aa88-37dc12e0edf0-kube-api-access-mgj7z\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.416406 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-config-data\") pod \"nova-metadata-0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.420752 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-76xxs"] Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.446210 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-sb\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.446368 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-nb\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.446706 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-svc\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.446841 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-config\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.446928 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-swift-storage-0\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.447182 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5cwk\" (UniqueName: \"kubernetes.io/projected/ae944d77-12f5-47c5-90b3-916ff3ca9e91-kube-api-access-h5cwk\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.455907 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.471853 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.529279 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.548486 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-sb\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.548538 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-nb\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.548585 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-svc\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.548646 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-config\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.549666 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-svc\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.549682 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-nb\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.549722 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-swift-storage-0\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.550075 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5cwk\" (UniqueName: \"kubernetes.io/projected/ae944d77-12f5-47c5-90b3-916ff3ca9e91-kube-api-access-h5cwk\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.550932 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-config\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.551558 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-swift-storage-0\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.551783 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-sb\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.576764 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5cwk\" (UniqueName: \"kubernetes.io/projected/ae944d77-12f5-47c5-90b3-916ff3ca9e91-kube-api-access-h5cwk\") pod \"dnsmasq-dns-845d6d6f59-76xxs\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.714235 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:01 crc kubenswrapper[4783]: I1002 11:18:01.816046 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-m7lqg"] Oct 02 11:18:02 crc kubenswrapper[4783]: W1002 11:18:02.095812 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6a18cba8_6810_488a_8538_ef42278f7162.slice/crio-43e38f40bc1f36cc4d86a8ca0706117712acf02e4f3b3f2c61fa095e5d453617 WatchSource:0}: Error finding container 43e38f40bc1f36cc4d86a8ca0706117712acf02e4f3b3f2c61fa095e5d453617: Status 404 returned error can't find the container with id 43e38f40bc1f36cc4d86a8ca0706117712acf02e4f3b3f2c61fa095e5d453617 Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.097075 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.299723 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.337776 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:02 crc kubenswrapper[4783]: W1002 11:18:02.343195 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podaecd88b6_c737_4b6a_abfe_e9e0b9f5f14b.slice/crio-72f9bb032bd2eaa1af5ce1f085772cb13fa786036422b79cfd2aae762e84a9c7 WatchSource:0}: Error finding container 72f9bb032bd2eaa1af5ce1f085772cb13fa786036422b79cfd2aae762e84a9c7: Status 404 returned error can't find the container with id 72f9bb032bd2eaa1af5ce1f085772cb13fa786036422b79cfd2aae762e84a9c7 Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.610823 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.632113 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-76xxs"] Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.701442 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b","Type":"ContainerStarted","Data":"72f9bb032bd2eaa1af5ce1f085772cb13fa786036422b79cfd2aae762e84a9c7"} Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.704347 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"181e715a-cbe0-49a9-99af-670ff225dcf2","Type":"ContainerStarted","Data":"508ae69d0381225c69783a6c3e70b96d9f06dcddf2d16fd7b93e4ff0795eab20"} Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.706684 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0885d8af-616a-47ab-aa88-37dc12e0edf0","Type":"ContainerStarted","Data":"aab0462e62caafc7f873d967d70c74432957dc8237450116dd4b94b7c4266b0c"} Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.710090 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a18cba8-6810-488a-8538-ef42278f7162","Type":"ContainerStarted","Data":"43e38f40bc1f36cc4d86a8ca0706117712acf02e4f3b3f2c61fa095e5d453617"} Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.714921 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m7lqg" event={"ID":"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a","Type":"ContainerStarted","Data":"e0599da871f31fffa19e1bfd33063e9b28ad45179bccfa39bc3d0d2474104dcb"} Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.715159 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m7lqg" event={"ID":"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a","Type":"ContainerStarted","Data":"3516d89df19879bf6385cc318ae0a40edf7c4ce56ff2d7ad6d1c03c48b0873b2"} Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.749504 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-m7lqg" podStartSLOduration=2.749481761 podStartE2EDuration="2.749481761s" podCreationTimestamp="2025-10-02 11:18:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:02.736566158 +0000 UTC m=+1516.052760419" watchObservedRunningTime="2025-10-02 11:18:02.749481761 +0000 UTC m=+1516.065676032" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.826482 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-28q9h"] Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.828339 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.851175 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.851534 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.871021 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-28q9h"] Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.897512 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.897913 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-config-data\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.898092 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-scripts\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.898216 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jdjfl\" (UniqueName: \"kubernetes.io/projected/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-kube-api-access-jdjfl\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.999835 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-config-data\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.999922 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-scripts\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:02 crc kubenswrapper[4783]: I1002 11:18:02.999972 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jdjfl\" (UniqueName: \"kubernetes.io/projected/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-kube-api-access-jdjfl\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.000007 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.008360 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-scripts\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.009302 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-config-data\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.018828 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jdjfl\" (UniqueName: \"kubernetes.io/projected/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-kube-api-access-jdjfl\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.019393 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-28q9h\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.291142 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.776089 4783 generic.go:334] "Generic (PLEG): container finished" podID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerID="0119868203a1cabafc90042406872130e7a3d5f651ae8564fc600f77af6f94be" exitCode=0 Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.777148 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" event={"ID":"ae944d77-12f5-47c5-90b3-916ff3ca9e91","Type":"ContainerDied","Data":"0119868203a1cabafc90042406872130e7a3d5f651ae8564fc600f77af6f94be"} Oct 02 11:18:03 crc kubenswrapper[4783]: I1002 11:18:03.777176 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" event={"ID":"ae944d77-12f5-47c5-90b3-916ff3ca9e91","Type":"ContainerStarted","Data":"cd6f6610aafb6cf9753d733b4416f8927401fcf04582630ecad1e966e9b35e2d"} Oct 02 11:18:04 crc kubenswrapper[4783]: I1002 11:18:04.093488 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-28q9h"] Oct 02 11:18:04 crc kubenswrapper[4783]: W1002 11:18:04.122596 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod757f9906_d7ab_4ae7_a2f2_5b83bf48b820.slice/crio-97dd51a72e6ec18a9856666c42ecd3ac1409c60d0351dca01725c020ab71469d WatchSource:0}: Error finding container 97dd51a72e6ec18a9856666c42ecd3ac1409c60d0351dca01725c020ab71469d: Status 404 returned error can't find the container with id 97dd51a72e6ec18a9856666c42ecd3ac1409c60d0351dca01725c020ab71469d Oct 02 11:18:04 crc kubenswrapper[4783]: I1002 11:18:04.789917 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" event={"ID":"ae944d77-12f5-47c5-90b3-916ff3ca9e91","Type":"ContainerStarted","Data":"595f0b1a220c6fbd13f0518942708eeee16df3e0a5e1ed7cd30c570b14b0cf45"} Oct 02 11:18:04 crc kubenswrapper[4783]: I1002 11:18:04.790321 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:04 crc kubenswrapper[4783]: I1002 11:18:04.797472 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-28q9h" event={"ID":"757f9906-d7ab-4ae7-a2f2-5b83bf48b820","Type":"ContainerStarted","Data":"97dd51a72e6ec18a9856666c42ecd3ac1409c60d0351dca01725c020ab71469d"} Oct 02 11:18:04 crc kubenswrapper[4783]: I1002 11:18:04.816311 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" podStartSLOduration=3.816293961 podStartE2EDuration="3.816293961s" podCreationTimestamp="2025-10-02 11:18:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:04.811535771 +0000 UTC m=+1518.127730032" watchObservedRunningTime="2025-10-02 11:18:04.816293961 +0000 UTC m=+1518.132488222" Oct 02 11:18:05 crc kubenswrapper[4783]: I1002 11:18:05.397666 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:05 crc kubenswrapper[4783]: I1002 11:18:05.421290 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:06 crc kubenswrapper[4783]: I1002 11:18:06.838110 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-28q9h" event={"ID":"757f9906-d7ab-4ae7-a2f2-5b83bf48b820","Type":"ContainerStarted","Data":"55e37a7c51cd44639abf5ca8788fa6eb1a731d04d07d1467c686f65391221c4d"} Oct 02 11:18:06 crc kubenswrapper[4783]: I1002 11:18:06.865831 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-28q9h" podStartSLOduration=4.865812106 podStartE2EDuration="4.865812106s" podCreationTimestamp="2025-10-02 11:18:02 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:06.859994447 +0000 UTC m=+1520.176188708" watchObservedRunningTime="2025-10-02 11:18:06.865812106 +0000 UTC m=+1520.182006367" Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.848546 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a18cba8-6810-488a-8538-ef42278f7162","Type":"ContainerStarted","Data":"4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b"} Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.851857 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b","Type":"ContainerStarted","Data":"1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f"} Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.851933 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f" gracePeriod=30 Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.854788 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"181e715a-cbe0-49a9-99af-670ff225dcf2","Type":"ContainerStarted","Data":"6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09"} Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.854841 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"181e715a-cbe0-49a9-99af-670ff225dcf2","Type":"ContainerStarted","Data":"1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10"} Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.860240 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-log" containerID="cri-o://293c1927e8ddfe2fd7f22e3a2485a7c5eec11cb6f3f033864efa8f08144b623e" gracePeriod=30 Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.860573 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0885d8af-616a-47ab-aa88-37dc12e0edf0","Type":"ContainerStarted","Data":"cf6dce16812624fac8e76b05afda8350340963bfae0d919f8804d75ff3fb2c34"} Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.860604 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0885d8af-616a-47ab-aa88-37dc12e0edf0","Type":"ContainerStarted","Data":"293c1927e8ddfe2fd7f22e3a2485a7c5eec11cb6f3f033864efa8f08144b623e"} Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.860718 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-metadata" containerID="cri-o://cf6dce16812624fac8e76b05afda8350340963bfae0d919f8804d75ff3fb2c34" gracePeriod=30 Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.871506 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.379604998 podStartE2EDuration="7.871483522s" podCreationTimestamp="2025-10-02 11:18:00 +0000 UTC" firstStartedPulling="2025-10-02 11:18:02.10386546 +0000 UTC m=+1515.420059721" lastFinishedPulling="2025-10-02 11:18:06.595743974 +0000 UTC m=+1519.911938245" observedRunningTime="2025-10-02 11:18:07.867648717 +0000 UTC m=+1521.183842988" watchObservedRunningTime="2025-10-02 11:18:07.871483522 +0000 UTC m=+1521.187677783" Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.892462 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.618005393 podStartE2EDuration="7.892436445s" podCreationTimestamp="2025-10-02 11:18:00 +0000 UTC" firstStartedPulling="2025-10-02 11:18:02.321353673 +0000 UTC m=+1515.637547934" lastFinishedPulling="2025-10-02 11:18:06.595784725 +0000 UTC m=+1519.911978986" observedRunningTime="2025-10-02 11:18:07.892361583 +0000 UTC m=+1521.208555844" watchObservedRunningTime="2025-10-02 11:18:07.892436445 +0000 UTC m=+1521.208630706" Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.910467 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.977583655 podStartE2EDuration="6.910440868s" podCreationTimestamp="2025-10-02 11:18:01 +0000 UTC" firstStartedPulling="2025-10-02 11:18:02.667187649 +0000 UTC m=+1515.983381920" lastFinishedPulling="2025-10-02 11:18:06.600044852 +0000 UTC m=+1519.916239133" observedRunningTime="2025-10-02 11:18:07.908783933 +0000 UTC m=+1521.224978194" watchObservedRunningTime="2025-10-02 11:18:07.910440868 +0000 UTC m=+1521.226635129" Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.932958 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=3.687417063 podStartE2EDuration="7.932940514s" podCreationTimestamp="2025-10-02 11:18:00 +0000 UTC" firstStartedPulling="2025-10-02 11:18:02.352481115 +0000 UTC m=+1515.668675376" lastFinishedPulling="2025-10-02 11:18:06.598004546 +0000 UTC m=+1519.914198827" observedRunningTime="2025-10-02 11:18:07.930850477 +0000 UTC m=+1521.247044748" watchObservedRunningTime="2025-10-02 11:18:07.932940514 +0000 UTC m=+1521.249134765" Oct 02 11:18:07 crc kubenswrapper[4783]: I1002 11:18:07.984621 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:08 crc kubenswrapper[4783]: I1002 11:18:08.118641 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/horizon-567b57d86d-gv6fq" podUID="48c11fb6-76f0-4028-a76f-6f67904bf3aa" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.144:8443/dashboard/auth/login/?next=/dashboard/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:08 crc kubenswrapper[4783]: I1002 11:18:08.877406 4783 generic.go:334] "Generic (PLEG): container finished" podID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerID="cf6dce16812624fac8e76b05afda8350340963bfae0d919f8804d75ff3fb2c34" exitCode=0 Oct 02 11:18:08 crc kubenswrapper[4783]: I1002 11:18:08.877471 4783 generic.go:334] "Generic (PLEG): container finished" podID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerID="293c1927e8ddfe2fd7f22e3a2485a7c5eec11cb6f3f033864efa8f08144b623e" exitCode=143 Oct 02 11:18:08 crc kubenswrapper[4783]: I1002 11:18:08.878047 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0885d8af-616a-47ab-aa88-37dc12e0edf0","Type":"ContainerDied","Data":"cf6dce16812624fac8e76b05afda8350340963bfae0d919f8804d75ff3fb2c34"} Oct 02 11:18:08 crc kubenswrapper[4783]: I1002 11:18:08.878099 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0885d8af-616a-47ab-aa88-37dc12e0edf0","Type":"ContainerDied","Data":"293c1927e8ddfe2fd7f22e3a2485a7c5eec11cb6f3f033864efa8f08144b623e"} Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.024942 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.185128 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-config-data\") pod \"0885d8af-616a-47ab-aa88-37dc12e0edf0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.185311 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0885d8af-616a-47ab-aa88-37dc12e0edf0-logs\") pod \"0885d8af-616a-47ab-aa88-37dc12e0edf0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.185487 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mgj7z\" (UniqueName: \"kubernetes.io/projected/0885d8af-616a-47ab-aa88-37dc12e0edf0-kube-api-access-mgj7z\") pod \"0885d8af-616a-47ab-aa88-37dc12e0edf0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.185586 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-combined-ca-bundle\") pod \"0885d8af-616a-47ab-aa88-37dc12e0edf0\" (UID: \"0885d8af-616a-47ab-aa88-37dc12e0edf0\") " Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.186245 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0885d8af-616a-47ab-aa88-37dc12e0edf0-logs" (OuterVolumeSpecName: "logs") pod "0885d8af-616a-47ab-aa88-37dc12e0edf0" (UID: "0885d8af-616a-47ab-aa88-37dc12e0edf0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.186634 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0885d8af-616a-47ab-aa88-37dc12e0edf0-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.209853 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0885d8af-616a-47ab-aa88-37dc12e0edf0-kube-api-access-mgj7z" (OuterVolumeSpecName: "kube-api-access-mgj7z") pod "0885d8af-616a-47ab-aa88-37dc12e0edf0" (UID: "0885d8af-616a-47ab-aa88-37dc12e0edf0"). InnerVolumeSpecName "kube-api-access-mgj7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.219541 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-config-data" (OuterVolumeSpecName: "config-data") pod "0885d8af-616a-47ab-aa88-37dc12e0edf0" (UID: "0885d8af-616a-47ab-aa88-37dc12e0edf0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.229027 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0885d8af-616a-47ab-aa88-37dc12e0edf0" (UID: "0885d8af-616a-47ab-aa88-37dc12e0edf0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.293014 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mgj7z\" (UniqueName: \"kubernetes.io/projected/0885d8af-616a-47ab-aa88-37dc12e0edf0-kube-api-access-mgj7z\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.293062 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.293076 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0885d8af-616a-47ab-aa88-37dc12e0edf0-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.898828 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"0885d8af-616a-47ab-aa88-37dc12e0edf0","Type":"ContainerDied","Data":"aab0462e62caafc7f873d967d70c74432957dc8237450116dd4b94b7c4266b0c"} Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.898880 4783 scope.go:117] "RemoveContainer" containerID="cf6dce16812624fac8e76b05afda8350340963bfae0d919f8804d75ff3fb2c34" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.898930 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.929197 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.943344 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.955992 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:09 crc kubenswrapper[4783]: E1002 11:18:09.956533 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-metadata" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.956560 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-metadata" Oct 02 11:18:09 crc kubenswrapper[4783]: E1002 11:18:09.956576 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-log" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.956584 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-log" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.956956 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-metadata" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.957071 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" containerName="nova-metadata-log" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.958394 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.961434 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.961698 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.962494 4783 scope.go:117] "RemoveContainer" containerID="293c1927e8ddfe2fd7f22e3a2485a7c5eec11cb6f3f033864efa8f08144b623e" Oct 02 11:18:09 crc kubenswrapper[4783]: I1002 11:18:09.985246 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.112624 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2035699-b1a9-4b70-9186-5981a069d8a9-logs\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.112754 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.112811 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6lj4h\" (UniqueName: \"kubernetes.io/projected/c2035699-b1a9-4b70-9186-5981a069d8a9-kube-api-access-6lj4h\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.112840 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.112900 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-config-data\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.214632 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2035699-b1a9-4b70-9186-5981a069d8a9-logs\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.214709 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.214752 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6lj4h\" (UniqueName: \"kubernetes.io/projected/c2035699-b1a9-4b70-9186-5981a069d8a9-kube-api-access-6lj4h\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.214771 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.214795 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-config-data\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.215938 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2035699-b1a9-4b70-9186-5981a069d8a9-logs\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.220117 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.228388 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-config-data\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.230472 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.234186 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6lj4h\" (UniqueName: \"kubernetes.io/projected/c2035699-b1a9-4b70-9186-5981a069d8a9-kube-api-access-6lj4h\") pod \"nova-metadata-0\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.284254 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.806845 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:10 crc kubenswrapper[4783]: W1002 11:18:10.812266 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc2035699_b1a9_4b70_9186_5981a069d8a9.slice/crio-17f704f6587fd6a1f8873d08c700efc227c0c97c8622dba9fa6ff91a24a47530 WatchSource:0}: Error finding container 17f704f6587fd6a1f8873d08c700efc227c0c97c8622dba9fa6ff91a24a47530: Status 404 returned error can't find the container with id 17f704f6587fd6a1f8873d08c700efc227c0c97c8622dba9fa6ff91a24a47530 Oct 02 11:18:10 crc kubenswrapper[4783]: I1002 11:18:10.909115 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2035699-b1a9-4b70-9186-5981a069d8a9","Type":"ContainerStarted","Data":"17f704f6587fd6a1f8873d08c700efc227c0c97c8622dba9fa6ff91a24a47530"} Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.299835 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.300146 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.336279 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.457450 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.457498 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.472927 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.574632 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0885d8af-616a-47ab-aa88-37dc12e0edf0" path="/var/lib/kubelet/pods/0885d8af-616a-47ab-aa88-37dc12e0edf0/volumes" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.717108 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.795297 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-4jjjq"] Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.795556 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerName="dnsmasq-dns" containerID="cri-o://f6821a9aeb15d7f710351a9e77a6842c399fce71c8e916d6156833251456b072" gracePeriod=10 Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.953756 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2035699-b1a9-4b70-9186-5981a069d8a9","Type":"ContainerStarted","Data":"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab"} Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.953811 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2035699-b1a9-4b70-9186-5981a069d8a9","Type":"ContainerStarted","Data":"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638"} Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.967775 4783 generic.go:334] "Generic (PLEG): container finished" podID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerID="f6821a9aeb15d7f710351a9e77a6842c399fce71c8e916d6156833251456b072" exitCode=0 Oct 02 11:18:11 crc kubenswrapper[4783]: I1002 11:18:11.968229 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" event={"ID":"9340097f-c1f0-4fd6-81eb-155ebf4a319d","Type":"ContainerDied","Data":"f6821a9aeb15d7f710351a9e77a6842c399fce71c8e916d6156833251456b072"} Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.020666 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.020644505 podStartE2EDuration="3.020644505s" podCreationTimestamp="2025-10-02 11:18:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:11.985002679 +0000 UTC m=+1525.301196960" watchObservedRunningTime="2025-10-02 11:18:12.020644505 +0000 UTC m=+1525.336838766" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.057607 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.063813 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.163:5353: connect: connection refused" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.457981 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.540954 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.540954 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.189:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.586799 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-nb\") pod \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.586906 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-swift-storage-0\") pod \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.586971 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zbwjt\" (UniqueName: \"kubernetes.io/projected/9340097f-c1f0-4fd6-81eb-155ebf4a319d-kube-api-access-zbwjt\") pod \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.587018 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-svc\") pod \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.587062 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-config\") pod \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.587092 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-sb\") pod \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\" (UID: \"9340097f-c1f0-4fd6-81eb-155ebf4a319d\") " Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.621192 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9340097f-c1f0-4fd6-81eb-155ebf4a319d-kube-api-access-zbwjt" (OuterVolumeSpecName: "kube-api-access-zbwjt") pod "9340097f-c1f0-4fd6-81eb-155ebf4a319d" (UID: "9340097f-c1f0-4fd6-81eb-155ebf4a319d"). InnerVolumeSpecName "kube-api-access-zbwjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.660453 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "9340097f-c1f0-4fd6-81eb-155ebf4a319d" (UID: "9340097f-c1f0-4fd6-81eb-155ebf4a319d"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.675105 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-config" (OuterVolumeSpecName: "config") pod "9340097f-c1f0-4fd6-81eb-155ebf4a319d" (UID: "9340097f-c1f0-4fd6-81eb-155ebf4a319d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.690386 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zbwjt\" (UniqueName: \"kubernetes.io/projected/9340097f-c1f0-4fd6-81eb-155ebf4a319d-kube-api-access-zbwjt\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.690672 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.690817 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.706953 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "9340097f-c1f0-4fd6-81eb-155ebf4a319d" (UID: "9340097f-c1f0-4fd6-81eb-155ebf4a319d"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.712880 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "9340097f-c1f0-4fd6-81eb-155ebf4a319d" (UID: "9340097f-c1f0-4fd6-81eb-155ebf4a319d"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.739530 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "9340097f-c1f0-4fd6-81eb-155ebf4a319d" (UID: "9340097f-c1f0-4fd6-81eb-155ebf4a319d"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.792988 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.793021 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.793033 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/9340097f-c1f0-4fd6-81eb-155ebf4a319d-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.979952 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.980130 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5784cf869f-4jjjq" event={"ID":"9340097f-c1f0-4fd6-81eb-155ebf4a319d","Type":"ContainerDied","Data":"d540ae7e371dd17bff3e211ce3ef7df13ce0702176935191ff6097ff62dce1a5"} Oct 02 11:18:12 crc kubenswrapper[4783]: I1002 11:18:12.980429 4783 scope.go:117] "RemoveContainer" containerID="f6821a9aeb15d7f710351a9e77a6842c399fce71c8e916d6156833251456b072" Oct 02 11:18:13 crc kubenswrapper[4783]: I1002 11:18:13.010847 4783 scope.go:117] "RemoveContainer" containerID="3c43d0f89df3267dbde8f25a2198937a6e0c20d55dfad6ff3b5986245e37c978" Oct 02 11:18:13 crc kubenswrapper[4783]: I1002 11:18:13.031801 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-4jjjq"] Oct 02 11:18:13 crc kubenswrapper[4783]: I1002 11:18:13.046425 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5784cf869f-4jjjq"] Oct 02 11:18:13 crc kubenswrapper[4783]: I1002 11:18:13.558675 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" path="/var/lib/kubelet/pods/9340097f-c1f0-4fd6-81eb-155ebf4a319d/volumes" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.235078 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.253674 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-tjmzw"] Oct 02 11:18:15 crc kubenswrapper[4783]: E1002 11:18:15.255270 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerName="init" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.255289 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerName="init" Oct 02 11:18:15 crc kubenswrapper[4783]: E1002 11:18:15.255309 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerName="dnsmasq-dns" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.255315 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerName="dnsmasq-dns" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.255548 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="9340097f-c1f0-4fd6-81eb-155ebf4a319d" containerName="dnsmasq-dns" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.256949 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.275129 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjmzw"] Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.285377 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.286237 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.313763 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.342055 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2d5zn\" (UniqueName: \"kubernetes.io/projected/106281fe-b164-4c5a-9597-34bc34658415-kube-api-access-2d5zn\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.342139 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-catalog-content\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.342248 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-utilities\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.443690 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-catalog-content\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.443833 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-utilities\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.443982 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2d5zn\" (UniqueName: \"kubernetes.io/projected/106281fe-b164-4c5a-9597-34bc34658415-kube-api-access-2d5zn\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.444280 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-catalog-content\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.444449 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-utilities\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.464756 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2d5zn\" (UniqueName: \"kubernetes.io/projected/106281fe-b164-4c5a-9597-34bc34658415-kube-api-access-2d5zn\") pod \"redhat-marketplace-tjmzw\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:15 crc kubenswrapper[4783]: I1002 11:18:15.586189 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:16 crc kubenswrapper[4783]: I1002 11:18:16.101337 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjmzw"] Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.023635 4783 generic.go:334] "Generic (PLEG): container finished" podID="a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" containerID="e0599da871f31fffa19e1bfd33063e9b28ad45179bccfa39bc3d0d2474104dcb" exitCode=0 Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.023712 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m7lqg" event={"ID":"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a","Type":"ContainerDied","Data":"e0599da871f31fffa19e1bfd33063e9b28ad45179bccfa39bc3d0d2474104dcb"} Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.027535 4783 generic.go:334] "Generic (PLEG): container finished" podID="106281fe-b164-4c5a-9597-34bc34658415" containerID="ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458" exitCode=0 Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.027575 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjmzw" event={"ID":"106281fe-b164-4c5a-9597-34bc34658415","Type":"ContainerDied","Data":"ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458"} Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.027597 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjmzw" event={"ID":"106281fe-b164-4c5a-9597-34bc34658415","Type":"ContainerStarted","Data":"650cae4827bb99e1451e6699ab722119abebf202b2e8160d4e972f5e049e6400"} Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.051005 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.085960 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/horizon-567b57d86d-gv6fq" Oct 02 11:18:17 crc kubenswrapper[4783]: I1002 11:18:17.180866 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fcdf587dd-wvthh"] Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.039119 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjmzw" event={"ID":"106281fe-b164-4c5a-9597-34bc34658415","Type":"ContainerStarted","Data":"e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab"} Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.041109 4783 generic.go:334] "Generic (PLEG): container finished" podID="757f9906-d7ab-4ae7-a2f2-5b83bf48b820" containerID="55e37a7c51cd44639abf5ca8788fa6eb1a731d04d07d1467c686f65391221c4d" exitCode=0 Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.041148 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-28q9h" event={"ID":"757f9906-d7ab-4ae7-a2f2-5b83bf48b820","Type":"ContainerDied","Data":"55e37a7c51cd44639abf5ca8788fa6eb1a731d04d07d1467c686f65391221c4d"} Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.041376 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon-log" containerID="cri-o://617e73daab1fa7d04c236e9a25fd46b5542b5fb3258901a035760e38676e12d9" gracePeriod=30 Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.041488 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" containerID="cri-o://ab15a3cd7992b45c329572700c99718692ab18197b5fb99e472dd12d4fcae8ec" gracePeriod=30 Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.438377 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.610153 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pxtrh\" (UniqueName: \"kubernetes.io/projected/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-kube-api-access-pxtrh\") pod \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.610274 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-combined-ca-bundle\") pod \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.610482 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-config-data\") pod \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.610545 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-scripts\") pod \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\" (UID: \"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a\") " Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.617057 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-scripts" (OuterVolumeSpecName: "scripts") pod "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" (UID: "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.617369 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-kube-api-access-pxtrh" (OuterVolumeSpecName: "kube-api-access-pxtrh") pod "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" (UID: "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a"). InnerVolumeSpecName "kube-api-access-pxtrh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.650674 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" (UID: "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.653031 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-config-data" (OuterVolumeSpecName: "config-data") pod "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" (UID: "a1d4a6fe-7b83-4b7a-8b8f-93006697d03a"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.712851 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pxtrh\" (UniqueName: \"kubernetes.io/projected/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-kube-api-access-pxtrh\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.712892 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.712908 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:18 crc kubenswrapper[4783]: I1002 11:18:18.712921 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.050334 4783 generic.go:334] "Generic (PLEG): container finished" podID="106281fe-b164-4c5a-9597-34bc34658415" containerID="e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab" exitCode=0 Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.050439 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjmzw" event={"ID":"106281fe-b164-4c5a-9597-34bc34658415","Type":"ContainerDied","Data":"e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab"} Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.051740 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-m7lqg" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.051829 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-m7lqg" event={"ID":"a1d4a6fe-7b83-4b7a-8b8f-93006697d03a","Type":"ContainerDied","Data":"3516d89df19879bf6385cc318ae0a40edf7c4ce56ff2d7ad6d1c03c48b0873b2"} Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.051856 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3516d89df19879bf6385cc318ae0a40edf7c4ce56ff2d7ad6d1c03c48b0873b2" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.293278 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.293803 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-log" containerID="cri-o://1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10" gracePeriod=30 Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.293938 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-api" containerID="cri-o://6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09" gracePeriod=30 Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.311960 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.312204 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="6a18cba8-6810-488a-8538-ef42278f7162" containerName="nova-scheduler-scheduler" containerID="cri-o://4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" gracePeriod=30 Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.322620 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.322831 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-log" containerID="cri-o://4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638" gracePeriod=30 Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.323179 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-metadata" containerID="cri-o://f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab" gracePeriod=30 Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.506918 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.634929 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-combined-ca-bundle\") pod \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.635036 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-scripts\") pod \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.635073 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-config-data\") pod \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.635119 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jdjfl\" (UniqueName: \"kubernetes.io/projected/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-kube-api-access-jdjfl\") pod \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\" (UID: \"757f9906-d7ab-4ae7-a2f2-5b83bf48b820\") " Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.645557 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-kube-api-access-jdjfl" (OuterVolumeSpecName: "kube-api-access-jdjfl") pod "757f9906-d7ab-4ae7-a2f2-5b83bf48b820" (UID: "757f9906-d7ab-4ae7-a2f2-5b83bf48b820"). InnerVolumeSpecName "kube-api-access-jdjfl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.657438 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-scripts" (OuterVolumeSpecName: "scripts") pod "757f9906-d7ab-4ae7-a2f2-5b83bf48b820" (UID: "757f9906-d7ab-4ae7-a2f2-5b83bf48b820"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.668681 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "757f9906-d7ab-4ae7-a2f2-5b83bf48b820" (UID: "757f9906-d7ab-4ae7-a2f2-5b83bf48b820"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.734376 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-config-data" (OuterVolumeSpecName: "config-data") pod "757f9906-d7ab-4ae7-a2f2-5b83bf48b820" (UID: "757f9906-d7ab-4ae7-a2f2-5b83bf48b820"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.737986 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.738015 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.738025 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jdjfl\" (UniqueName: \"kubernetes.io/projected/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-kube-api-access-jdjfl\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.738037 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/757f9906-d7ab-4ae7-a2f2-5b83bf48b820-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:19 crc kubenswrapper[4783]: I1002 11:18:19.864118 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.043429 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-config-data\") pod \"c2035699-b1a9-4b70-9186-5981a069d8a9\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.043583 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2035699-b1a9-4b70-9186-5981a069d8a9-logs\") pod \"c2035699-b1a9-4b70-9186-5981a069d8a9\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.043656 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6lj4h\" (UniqueName: \"kubernetes.io/projected/c2035699-b1a9-4b70-9186-5981a069d8a9-kube-api-access-6lj4h\") pod \"c2035699-b1a9-4b70-9186-5981a069d8a9\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.043704 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-combined-ca-bundle\") pod \"c2035699-b1a9-4b70-9186-5981a069d8a9\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.043745 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-nova-metadata-tls-certs\") pod \"c2035699-b1a9-4b70-9186-5981a069d8a9\" (UID: \"c2035699-b1a9-4b70-9186-5981a069d8a9\") " Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.044098 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c2035699-b1a9-4b70-9186-5981a069d8a9-logs" (OuterVolumeSpecName: "logs") pod "c2035699-b1a9-4b70-9186-5981a069d8a9" (UID: "c2035699-b1a9-4b70-9186-5981a069d8a9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.044392 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c2035699-b1a9-4b70-9186-5981a069d8a9-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.047726 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c2035699-b1a9-4b70-9186-5981a069d8a9-kube-api-access-6lj4h" (OuterVolumeSpecName: "kube-api-access-6lj4h") pod "c2035699-b1a9-4b70-9186-5981a069d8a9" (UID: "c2035699-b1a9-4b70-9186-5981a069d8a9"). InnerVolumeSpecName "kube-api-access-6lj4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.069086 4783 generic.go:334] "Generic (PLEG): container finished" podID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerID="1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10" exitCode=143 Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.069176 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"181e715a-cbe0-49a9-99af-670ff225dcf2","Type":"ContainerDied","Data":"1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10"} Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.071192 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-28q9h" event={"ID":"757f9906-d7ab-4ae7-a2f2-5b83bf48b820","Type":"ContainerDied","Data":"97dd51a72e6ec18a9856666c42ecd3ac1409c60d0351dca01725c020ab71469d"} Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.071224 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="97dd51a72e6ec18a9856666c42ecd3ac1409c60d0351dca01725c020ab71469d" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.071285 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-28q9h" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.075870 4783 generic.go:334] "Generic (PLEG): container finished" podID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerID="f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab" exitCode=0 Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.075908 4783 generic.go:334] "Generic (PLEG): container finished" podID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerID="4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638" exitCode=143 Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.076112 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2035699-b1a9-4b70-9186-5981a069d8a9","Type":"ContainerDied","Data":"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab"} Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.076153 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2035699-b1a9-4b70-9186-5981a069d8a9","Type":"ContainerDied","Data":"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638"} Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.076170 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"c2035699-b1a9-4b70-9186-5981a069d8a9","Type":"ContainerDied","Data":"17f704f6587fd6a1f8873d08c700efc227c0c97c8622dba9fa6ff91a24a47530"} Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.076187 4783 scope.go:117] "RemoveContainer" containerID="f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.076334 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.078396 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c2035699-b1a9-4b70-9186-5981a069d8a9" (UID: "c2035699-b1a9-4b70-9186-5981a069d8a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.080179 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-config-data" (OuterVolumeSpecName: "config-data") pod "c2035699-b1a9-4b70-9186-5981a069d8a9" (UID: "c2035699-b1a9-4b70-9186-5981a069d8a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.082023 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjmzw" event={"ID":"106281fe-b164-4c5a-9597-34bc34658415","Type":"ContainerStarted","Data":"1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5"} Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.105059 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-tjmzw" podStartSLOduration=2.448564646 podStartE2EDuration="5.105041605s" podCreationTimestamp="2025-10-02 11:18:15 +0000 UTC" firstStartedPulling="2025-10-02 11:18:17.029807676 +0000 UTC m=+1530.346001937" lastFinishedPulling="2025-10-02 11:18:19.686284635 +0000 UTC m=+1533.002478896" observedRunningTime="2025-10-02 11:18:20.103550845 +0000 UTC m=+1533.419745106" watchObservedRunningTime="2025-10-02 11:18:20.105041605 +0000 UTC m=+1533.421235866" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.109949 4783 scope.go:117] "RemoveContainer" containerID="4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.145784 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.145999 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6lj4h\" (UniqueName: \"kubernetes.io/projected/c2035699-b1a9-4b70-9186-5981a069d8a9-kube-api-access-6lj4h\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.146099 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.153854 4783 scope.go:117] "RemoveContainer" containerID="f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab" Oct 02 11:18:20 crc kubenswrapper[4783]: E1002 11:18:20.155584 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab\": container with ID starting with f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab not found: ID does not exist" containerID="f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.157283 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab"} err="failed to get container status \"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab\": rpc error: code = NotFound desc = could not find container \"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab\": container with ID starting with f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab not found: ID does not exist" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.157323 4783 scope.go:117] "RemoveContainer" containerID="4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638" Oct 02 11:18:20 crc kubenswrapper[4783]: E1002 11:18:20.159115 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638\": container with ID starting with 4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638 not found: ID does not exist" containerID="4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.159170 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638"} err="failed to get container status \"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638\": rpc error: code = NotFound desc = could not find container \"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638\": container with ID starting with 4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638 not found: ID does not exist" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.159199 4783 scope.go:117] "RemoveContainer" containerID="f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.159727 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab"} err="failed to get container status \"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab\": rpc error: code = NotFound desc = could not find container \"f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab\": container with ID starting with f61beb57cab7fb700298e481f4257de9548d5b4b5dc94649335afba1adab20ab not found: ID does not exist" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.159834 4783 scope.go:117] "RemoveContainer" containerID="4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.160219 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638"} err="failed to get container status \"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638\": rpc error: code = NotFound desc = could not find container \"4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638\": container with ID starting with 4656280acd2050da8be54436494d228ab0623c3030898b19374374da4da63638 not found: ID does not exist" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.170622 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "c2035699-b1a9-4b70-9186-5981a069d8a9" (UID: "c2035699-b1a9-4b70-9186-5981a069d8a9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.179133 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 02 11:18:20 crc kubenswrapper[4783]: E1002 11:18:20.179610 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-metadata" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.179626 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-metadata" Oct 02 11:18:20 crc kubenswrapper[4783]: E1002 11:18:20.179649 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="757f9906-d7ab-4ae7-a2f2-5b83bf48b820" containerName="nova-cell1-conductor-db-sync" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.179656 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="757f9906-d7ab-4ae7-a2f2-5b83bf48b820" containerName="nova-cell1-conductor-db-sync" Oct 02 11:18:20 crc kubenswrapper[4783]: E1002 11:18:20.179668 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" containerName="nova-manage" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.179676 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" containerName="nova-manage" Oct 02 11:18:20 crc kubenswrapper[4783]: E1002 11:18:20.179738 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-log" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.179746 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-log" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.179970 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-metadata" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.179996 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" containerName="nova-manage" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.180010 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" containerName="nova-metadata-log" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.180021 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="757f9906-d7ab-4ae7-a2f2-5b83bf48b820" containerName="nova-cell1-conductor-db-sync" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.180666 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.184989 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.191220 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.247573 4783 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/c2035699-b1a9-4b70-9186-5981a069d8a9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.349065 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac6d24ee-eef2-426e-a186-b24150c6e1e9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.349192 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58kd8\" (UniqueName: \"kubernetes.io/projected/ac6d24ee-eef2-426e-a186-b24150c6e1e9-kube-api-access-58kd8\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.349254 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac6d24ee-eef2-426e-a186-b24150c6e1e9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.450355 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac6d24ee-eef2-426e-a186-b24150c6e1e9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.450413 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac6d24ee-eef2-426e-a186-b24150c6e1e9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.450531 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58kd8\" (UniqueName: \"kubernetes.io/projected/ac6d24ee-eef2-426e-a186-b24150c6e1e9-kube-api-access-58kd8\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.454376 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.455411 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac6d24ee-eef2-426e-a186-b24150c6e1e9-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.457357 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac6d24ee-eef2-426e-a186-b24150c6e1e9-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.478138 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58kd8\" (UniqueName: \"kubernetes.io/projected/ac6d24ee-eef2-426e-a186-b24150c6e1e9-kube-api-access-58kd8\") pod \"nova-cell1-conductor-0\" (UID: \"ac6d24ee-eef2-426e-a186-b24150c6e1e9\") " pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.481171 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.496445 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.496894 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.498057 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.500276 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.500981 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.510495 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.652640 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-config-data\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.653030 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cb28c52-08fa-4a54-86d0-445e9c7706a9-logs\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.653053 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.653114 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8j6f\" (UniqueName: \"kubernetes.io/projected/1cb28c52-08fa-4a54-86d0-445e9c7706a9-kube-api-access-v8j6f\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.653185 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.754498 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8j6f\" (UniqueName: \"kubernetes.io/projected/1cb28c52-08fa-4a54-86d0-445e9c7706a9-kube-api-access-v8j6f\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.754578 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.754657 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-config-data\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.754713 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cb28c52-08fa-4a54-86d0-445e9c7706a9-logs\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.754735 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.755232 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cb28c52-08fa-4a54-86d0-445e9c7706a9-logs\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.758311 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.759275 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-config-data\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.759713 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.793221 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8j6f\" (UniqueName: \"kubernetes.io/projected/1cb28c52-08fa-4a54-86d0-445e9c7706a9-kube-api-access-v8j6f\") pod \"nova-metadata-0\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.901505 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:18:20 crc kubenswrapper[4783]: I1002 11:18:20.996698 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Oct 02 11:18:21 crc kubenswrapper[4783]: I1002 11:18:21.114612 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ac6d24ee-eef2-426e-a186-b24150c6e1e9","Type":"ContainerStarted","Data":"84f87bcd4085503726f193764c69b8d4be76b9ddda1d3bd0cdec80a7880da9e7"} Oct 02 11:18:21 crc kubenswrapper[4783]: E1002 11:18:21.299934 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod8abc2e1e_de94_4880_8a75_0c7ee0a2cdba.slice/crio-conmon-ab15a3cd7992b45c329572700c99718692ab18197b5fb99e472dd12d4fcae8ec.scope\": RecentStats: unable to find data in memory cache]" Oct 02 11:18:21 crc kubenswrapper[4783]: E1002 11:18:21.306399 4783 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 02 11:18:21 crc kubenswrapper[4783]: E1002 11:18:21.311927 4783 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 02 11:18:21 crc kubenswrapper[4783]: E1002 11:18:21.315520 4783 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 02 11:18:21 crc kubenswrapper[4783]: E1002 11:18:21.315568 4783 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="6a18cba8-6810-488a-8538-ef42278f7162" containerName="nova-scheduler-scheduler" Oct 02 11:18:21 crc kubenswrapper[4783]: I1002 11:18:21.428869 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:18:21 crc kubenswrapper[4783]: I1002 11:18:21.513181 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:18:21 crc kubenswrapper[4783]: I1002 11:18:21.513253 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:18:21 crc kubenswrapper[4783]: I1002 11:18:21.559471 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c2035699-b1a9-4b70-9186-5981a069d8a9" path="/var/lib/kubelet/pods/c2035699-b1a9-4b70-9186-5981a069d8a9/volumes" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.026712 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-xhh9j"] Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.029070 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.057278 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xhh9j"] Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.145672 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1cb28c52-08fa-4a54-86d0-445e9c7706a9","Type":"ContainerStarted","Data":"4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104"} Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.145723 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1cb28c52-08fa-4a54-86d0-445e9c7706a9","Type":"ContainerStarted","Data":"4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd"} Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.145738 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1cb28c52-08fa-4a54-86d0-445e9c7706a9","Type":"ContainerStarted","Data":"7d7e126b21400e498017914ee79959c02ec568e58a4d0e6eefde889508a702ef"} Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.151640 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"ac6d24ee-eef2-426e-a186-b24150c6e1e9","Type":"ContainerStarted","Data":"f49ce4c654ee77214284e0883ee606167fa9e510ca6b162f5fc286d0330f4eff"} Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.152543 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.164595 4783 generic.go:334] "Generic (PLEG): container finished" podID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerID="ab15a3cd7992b45c329572700c99718692ab18197b5fb99e472dd12d4fcae8ec" exitCode=0 Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.164643 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerDied","Data":"ab15a3cd7992b45c329572700c99718692ab18197b5fb99e472dd12d4fcae8ec"} Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.164676 4783 scope.go:117] "RemoveContainer" containerID="c82809b9c6eba0a8170866eb1d26bf56ad2341d1b01f79a18fbfd51af89b92ce" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.183449 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.183405051 podStartE2EDuration="2.183405051s" podCreationTimestamp="2025-10-02 11:18:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:22.18154646 +0000 UTC m=+1535.497740731" watchObservedRunningTime="2025-10-02 11:18:22.183405051 +0000 UTC m=+1535.499599312" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.194194 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-catalog-content\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.194267 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9wtl\" (UniqueName: \"kubernetes.io/projected/f8cc2cdc-b576-402e-871a-4e8f6a604c62-kube-api-access-m9wtl\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.194374 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-utilities\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.215236 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.215221322 podStartE2EDuration="2.215221322s" podCreationTimestamp="2025-10-02 11:18:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:22.21151794 +0000 UTC m=+1535.527712201" watchObservedRunningTime="2025-10-02 11:18:22.215221322 +0000 UTC m=+1535.531415583" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.295817 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-catalog-content\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.295883 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9wtl\" (UniqueName: \"kubernetes.io/projected/f8cc2cdc-b576-402e-871a-4e8f6a604c62-kube-api-access-m9wtl\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.295964 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-utilities\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.297182 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-catalog-content\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.297915 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-utilities\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.316697 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9wtl\" (UniqueName: \"kubernetes.io/projected/f8cc2cdc-b576-402e-871a-4e8f6a604c62-kube-api-access-m9wtl\") pod \"community-operators-xhh9j\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.352337 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.854912 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-xhh9j"] Oct 02 11:18:22 crc kubenswrapper[4783]: I1002 11:18:22.981295 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.066024 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.179624 4783 generic.go:334] "Generic (PLEG): container finished" podID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerID="8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1" exitCode=0 Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.179689 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xhh9j" event={"ID":"f8cc2cdc-b576-402e-871a-4e8f6a604c62","Type":"ContainerDied","Data":"8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1"} Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.179717 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xhh9j" event={"ID":"f8cc2cdc-b576-402e-871a-4e8f6a604c62","Type":"ContainerStarted","Data":"c8a655c5a94837cc68d33f3831bdc6149023f75c17b24788489a78b48a868ec8"} Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.187550 4783 generic.go:334] "Generic (PLEG): container finished" podID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerID="6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09" exitCode=0 Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.187645 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"181e715a-cbe0-49a9-99af-670ff225dcf2","Type":"ContainerDied","Data":"6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09"} Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.187674 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"181e715a-cbe0-49a9-99af-670ff225dcf2","Type":"ContainerDied","Data":"508ae69d0381225c69783a6c3e70b96d9f06dcddf2d16fd7b93e4ff0795eab20"} Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.187690 4783 scope.go:117] "RemoveContainer" containerID="6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.187711 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.213912 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-config-data\") pod \"181e715a-cbe0-49a9-99af-670ff225dcf2\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.214225 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9fjnh\" (UniqueName: \"kubernetes.io/projected/181e715a-cbe0-49a9-99af-670ff225dcf2-kube-api-access-9fjnh\") pod \"181e715a-cbe0-49a9-99af-670ff225dcf2\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.214455 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/181e715a-cbe0-49a9-99af-670ff225dcf2-logs\") pod \"181e715a-cbe0-49a9-99af-670ff225dcf2\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.214951 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/181e715a-cbe0-49a9-99af-670ff225dcf2-logs" (OuterVolumeSpecName: "logs") pod "181e715a-cbe0-49a9-99af-670ff225dcf2" (UID: "181e715a-cbe0-49a9-99af-670ff225dcf2"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.215002 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-combined-ca-bundle\") pod \"181e715a-cbe0-49a9-99af-670ff225dcf2\" (UID: \"181e715a-cbe0-49a9-99af-670ff225dcf2\") " Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.215786 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/181e715a-cbe0-49a9-99af-670ff225dcf2-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.224349 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/181e715a-cbe0-49a9-99af-670ff225dcf2-kube-api-access-9fjnh" (OuterVolumeSpecName: "kube-api-access-9fjnh") pod "181e715a-cbe0-49a9-99af-670ff225dcf2" (UID: "181e715a-cbe0-49a9-99af-670ff225dcf2"). InnerVolumeSpecName "kube-api-access-9fjnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.229716 4783 scope.go:117] "RemoveContainer" containerID="1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.239910 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "181e715a-cbe0-49a9-99af-670ff225dcf2" (UID: "181e715a-cbe0-49a9-99af-670ff225dcf2"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.258223 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-config-data" (OuterVolumeSpecName: "config-data") pod "181e715a-cbe0-49a9-99af-670ff225dcf2" (UID: "181e715a-cbe0-49a9-99af-670ff225dcf2"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.258360 4783 scope.go:117] "RemoveContainer" containerID="6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09" Oct 02 11:18:23 crc kubenswrapper[4783]: E1002 11:18:23.258919 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09\": container with ID starting with 6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09 not found: ID does not exist" containerID="6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.259242 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09"} err="failed to get container status \"6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09\": rpc error: code = NotFound desc = could not find container \"6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09\": container with ID starting with 6ab0a13eba933032b7ec0af9928652eababe9e3493eb6678a6bb592c39537e09 not found: ID does not exist" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.259281 4783 scope.go:117] "RemoveContainer" containerID="1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10" Oct 02 11:18:23 crc kubenswrapper[4783]: E1002 11:18:23.260091 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10\": container with ID starting with 1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10 not found: ID does not exist" containerID="1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.260124 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10"} err="failed to get container status \"1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10\": rpc error: code = NotFound desc = could not find container \"1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10\": container with ID starting with 1bcaba9ce89ba7a63373f5c39aa4836181668dc20e49eeec751bb0dfab952a10 not found: ID does not exist" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.317113 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.317147 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9fjnh\" (UniqueName: \"kubernetes.io/projected/181e715a-cbe0-49a9-99af-670ff225dcf2-kube-api-access-9fjnh\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.317158 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/181e715a-cbe0-49a9-99af-670ff225dcf2-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.528929 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.535660 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.564789 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" path="/var/lib/kubelet/pods/181e715a-cbe0-49a9-99af-670ff225dcf2/volumes" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.565491 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:23 crc kubenswrapper[4783]: E1002 11:18:23.565801 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-api" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.565819 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-api" Oct 02 11:18:23 crc kubenswrapper[4783]: E1002 11:18:23.565838 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-log" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.565845 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-log" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.565995 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-log" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.566010 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="181e715a-cbe0-49a9-99af-670ff225dcf2" containerName="nova-api-api" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.567091 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.567177 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.569064 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.730142 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-config-data\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.730233 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.730295 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d656e193-b275-4f25-a1f4-6be84d785a5c-logs\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.730325 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ffp4j\" (UniqueName: \"kubernetes.io/projected/d656e193-b275-4f25-a1f4-6be84d785a5c-kube-api-access-ffp4j\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.832511 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-config-data\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.832593 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.832655 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d656e193-b275-4f25-a1f4-6be84d785a5c-logs\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.832689 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ffp4j\" (UniqueName: \"kubernetes.io/projected/d656e193-b275-4f25-a1f4-6be84d785a5c-kube-api-access-ffp4j\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.833805 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d656e193-b275-4f25-a1f4-6be84d785a5c-logs\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.840314 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.842313 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-config-data\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.860943 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ffp4j\" (UniqueName: \"kubernetes.io/projected/d656e193-b275-4f25-a1f4-6be84d785a5c-kube-api-access-ffp4j\") pod \"nova-api-0\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " pod="openstack/nova-api-0" Oct 02 11:18:23 crc kubenswrapper[4783]: I1002 11:18:23.892649 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:24 crc kubenswrapper[4783]: I1002 11:18:24.397351 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:24 crc kubenswrapper[4783]: I1002 11:18:24.985213 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.159207 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-config-data\") pod \"6a18cba8-6810-488a-8538-ef42278f7162\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.159304 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-combined-ca-bundle\") pod \"6a18cba8-6810-488a-8538-ef42278f7162\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.159347 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xlkqb\" (UniqueName: \"kubernetes.io/projected/6a18cba8-6810-488a-8538-ef42278f7162-kube-api-access-xlkqb\") pod \"6a18cba8-6810-488a-8538-ef42278f7162\" (UID: \"6a18cba8-6810-488a-8538-ef42278f7162\") " Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.164236 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6a18cba8-6810-488a-8538-ef42278f7162-kube-api-access-xlkqb" (OuterVolumeSpecName: "kube-api-access-xlkqb") pod "6a18cba8-6810-488a-8538-ef42278f7162" (UID: "6a18cba8-6810-488a-8538-ef42278f7162"). InnerVolumeSpecName "kube-api-access-xlkqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.200820 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6a18cba8-6810-488a-8538-ef42278f7162" (UID: "6a18cba8-6810-488a-8538-ef42278f7162"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.203735 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-config-data" (OuterVolumeSpecName: "config-data") pod "6a18cba8-6810-488a-8538-ef42278f7162" (UID: "6a18cba8-6810-488a-8538-ef42278f7162"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.222791 4783 generic.go:334] "Generic (PLEG): container finished" podID="6a18cba8-6810-488a-8538-ef42278f7162" containerID="4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" exitCode=0 Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.222830 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.222848 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a18cba8-6810-488a-8538-ef42278f7162","Type":"ContainerDied","Data":"4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b"} Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.225855 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"6a18cba8-6810-488a-8538-ef42278f7162","Type":"ContainerDied","Data":"43e38f40bc1f36cc4d86a8ca0706117712acf02e4f3b3f2c61fa095e5d453617"} Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.225887 4783 scope.go:117] "RemoveContainer" containerID="4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.234565 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xhh9j" event={"ID":"f8cc2cdc-b576-402e-871a-4e8f6a604c62","Type":"ContainerStarted","Data":"76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f"} Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.238217 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d656e193-b275-4f25-a1f4-6be84d785a5c","Type":"ContainerStarted","Data":"d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d"} Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.238603 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d656e193-b275-4f25-a1f4-6be84d785a5c","Type":"ContainerStarted","Data":"5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df"} Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.238728 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d656e193-b275-4f25-a1f4-6be84d785a5c","Type":"ContainerStarted","Data":"6e2471580f45314cddad5d3284220a15eccae40d5f814688ea755610b54e11a0"} Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.261352 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.261398 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6a18cba8-6810-488a-8538-ef42278f7162-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.261429 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xlkqb\" (UniqueName: \"kubernetes.io/projected/6a18cba8-6810-488a-8538-ef42278f7162-kube-api-access-xlkqb\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.271008 4783 scope.go:117] "RemoveContainer" containerID="4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" Oct 02 11:18:25 crc kubenswrapper[4783]: E1002 11:18:25.272323 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b\": container with ID starting with 4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b not found: ID does not exist" containerID="4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.272375 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b"} err="failed to get container status \"4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b\": rpc error: code = NotFound desc = could not find container \"4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b\": container with ID starting with 4c90ab43907f5581dba9a3f080c210f1d8c2368261fb554155a0ffa7a8e80c1b not found: ID does not exist" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.282612 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.301015 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.310504 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:25 crc kubenswrapper[4783]: E1002 11:18:25.310912 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6a18cba8-6810-488a-8538-ef42278f7162" containerName="nova-scheduler-scheduler" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.310929 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="6a18cba8-6810-488a-8538-ef42278f7162" containerName="nova-scheduler-scheduler" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.311100 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="6a18cba8-6810-488a-8538-ef42278f7162" containerName="nova-scheduler-scheduler" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.311719 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.313211 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.313155022 podStartE2EDuration="2.313155022s" podCreationTimestamp="2025-10-02 11:18:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:25.292091916 +0000 UTC m=+1538.608286177" watchObservedRunningTime="2025-10-02 11:18:25.313155022 +0000 UTC m=+1538.629349283" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.314531 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.338485 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.464478 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x44zd\" (UniqueName: \"kubernetes.io/projected/e89f85ef-910d-4b8f-8273-a59e2435f4e4-kube-api-access-x44zd\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.464566 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.464727 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-config-data\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.556379 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6a18cba8-6810-488a-8538-ef42278f7162" path="/var/lib/kubelet/pods/6a18cba8-6810-488a-8538-ef42278f7162/volumes" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.566594 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-config-data\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.566684 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x44zd\" (UniqueName: \"kubernetes.io/projected/e89f85ef-910d-4b8f-8273-a59e2435f4e4-kube-api-access-x44zd\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.566722 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.570903 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.571351 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-config-data\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.583792 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x44zd\" (UniqueName: \"kubernetes.io/projected/e89f85ef-910d-4b8f-8273-a59e2435f4e4-kube-api-access-x44zd\") pod \"nova-scheduler-0\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.587770 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.587828 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.632716 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:18:25 crc kubenswrapper[4783]: I1002 11:18:25.642076 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:26 crc kubenswrapper[4783]: I1002 11:18:25.902018 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 02 11:18:26 crc kubenswrapper[4783]: I1002 11:18:25.902339 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 02 11:18:26 crc kubenswrapper[4783]: I1002 11:18:26.093474 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:26 crc kubenswrapper[4783]: I1002 11:18:26.251301 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e89f85ef-910d-4b8f-8273-a59e2435f4e4","Type":"ContainerStarted","Data":"c3436badf36167939caf2fe4db13b45f47c46d7372aebf14c4dc5fdf942abeb0"} Oct 02 11:18:26 crc kubenswrapper[4783]: I1002 11:18:26.259454 4783 generic.go:334] "Generic (PLEG): container finished" podID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerID="76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f" exitCode=0 Oct 02 11:18:26 crc kubenswrapper[4783]: I1002 11:18:26.259514 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xhh9j" event={"ID":"f8cc2cdc-b576-402e-871a-4e8f6a604c62","Type":"ContainerDied","Data":"76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f"} Oct 02 11:18:26 crc kubenswrapper[4783]: I1002 11:18:26.432057 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:27 crc kubenswrapper[4783]: I1002 11:18:27.271171 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xhh9j" event={"ID":"f8cc2cdc-b576-402e-871a-4e8f6a604c62","Type":"ContainerStarted","Data":"0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268"} Oct 02 11:18:27 crc kubenswrapper[4783]: I1002 11:18:27.273760 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e89f85ef-910d-4b8f-8273-a59e2435f4e4","Type":"ContainerStarted","Data":"a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884"} Oct 02 11:18:27 crc kubenswrapper[4783]: I1002 11:18:27.305367 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-xhh9j" podStartSLOduration=1.7450997529999999 podStartE2EDuration="5.305342018s" podCreationTimestamp="2025-10-02 11:18:22 +0000 UTC" firstStartedPulling="2025-10-02 11:18:23.183626387 +0000 UTC m=+1536.499820648" lastFinishedPulling="2025-10-02 11:18:26.743868652 +0000 UTC m=+1540.060062913" observedRunningTime="2025-10-02 11:18:27.299821757 +0000 UTC m=+1540.616016018" watchObservedRunningTime="2025-10-02 11:18:27.305342018 +0000 UTC m=+1540.621536279" Oct 02 11:18:27 crc kubenswrapper[4783]: I1002 11:18:27.612106 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.612087114 podStartE2EDuration="2.612087114s" podCreationTimestamp="2025-10-02 11:18:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:27.322428846 +0000 UTC m=+1540.638623107" watchObservedRunningTime="2025-10-02 11:18:27.612087114 +0000 UTC m=+1540.928281375" Oct 02 11:18:27 crc kubenswrapper[4783]: I1002 11:18:27.613142 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjmzw"] Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.284899 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-tjmzw" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="registry-server" containerID="cri-o://1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5" gracePeriod=2 Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.773813 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.967816 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d5zn\" (UniqueName: \"kubernetes.io/projected/106281fe-b164-4c5a-9597-34bc34658415-kube-api-access-2d5zn\") pod \"106281fe-b164-4c5a-9597-34bc34658415\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.967924 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-catalog-content\") pod \"106281fe-b164-4c5a-9597-34bc34658415\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.967974 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-utilities\") pod \"106281fe-b164-4c5a-9597-34bc34658415\" (UID: \"106281fe-b164-4c5a-9597-34bc34658415\") " Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.968791 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-utilities" (OuterVolumeSpecName: "utilities") pod "106281fe-b164-4c5a-9597-34bc34658415" (UID: "106281fe-b164-4c5a-9597-34bc34658415"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.969284 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.987936 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/106281fe-b164-4c5a-9597-34bc34658415-kube-api-access-2d5zn" (OuterVolumeSpecName: "kube-api-access-2d5zn") pod "106281fe-b164-4c5a-9597-34bc34658415" (UID: "106281fe-b164-4c5a-9597-34bc34658415"). InnerVolumeSpecName "kube-api-access-2d5zn". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:28 crc kubenswrapper[4783]: I1002 11:18:28.993824 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "106281fe-b164-4c5a-9597-34bc34658415" (UID: "106281fe-b164-4c5a-9597-34bc34658415"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.070050 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d5zn\" (UniqueName: \"kubernetes.io/projected/106281fe-b164-4c5a-9597-34bc34658415-kube-api-access-2d5zn\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.070083 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/106281fe-b164-4c5a-9597-34bc34658415-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.295532 4783 generic.go:334] "Generic (PLEG): container finished" podID="106281fe-b164-4c5a-9597-34bc34658415" containerID="1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5" exitCode=0 Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.295583 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjmzw" event={"ID":"106281fe-b164-4c5a-9597-34bc34658415","Type":"ContainerDied","Data":"1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5"} Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.295609 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-tjmzw" event={"ID":"106281fe-b164-4c5a-9597-34bc34658415","Type":"ContainerDied","Data":"650cae4827bb99e1451e6699ab722119abebf202b2e8160d4e972f5e049e6400"} Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.295607 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-tjmzw" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.295625 4783 scope.go:117] "RemoveContainer" containerID="1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.316025 4783 scope.go:117] "RemoveContainer" containerID="e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.345815 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjmzw"] Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.355294 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-tjmzw"] Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.355709 4783 scope.go:117] "RemoveContainer" containerID="ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.391984 4783 scope.go:117] "RemoveContainer" containerID="1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5" Oct 02 11:18:29 crc kubenswrapper[4783]: E1002 11:18:29.392392 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5\": container with ID starting with 1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5 not found: ID does not exist" containerID="1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.392457 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5"} err="failed to get container status \"1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5\": rpc error: code = NotFound desc = could not find container \"1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5\": container with ID starting with 1c18e2a2457b5ef1f5b8cc4a6c176b836fb55158d1447eb35374c9e11c8e76e5 not found: ID does not exist" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.392482 4783 scope.go:117] "RemoveContainer" containerID="e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab" Oct 02 11:18:29 crc kubenswrapper[4783]: E1002 11:18:29.392800 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab\": container with ID starting with e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab not found: ID does not exist" containerID="e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.392824 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab"} err="failed to get container status \"e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab\": rpc error: code = NotFound desc = could not find container \"e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab\": container with ID starting with e53c9d6f77589e4b8d257b15649cbb3369c1cc7039ae1ebe4d1955564cd131ab not found: ID does not exist" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.392839 4783 scope.go:117] "RemoveContainer" containerID="ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458" Oct 02 11:18:29 crc kubenswrapper[4783]: E1002 11:18:29.393108 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458\": container with ID starting with ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458 not found: ID does not exist" containerID="ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.393135 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458"} err="failed to get container status \"ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458\": rpc error: code = NotFound desc = could not find container \"ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458\": container with ID starting with ee6f32ecb814597f3a52a21e13f0bc322491efd976e6c2138e04940db5272458 not found: ID does not exist" Oct 02 11:18:29 crc kubenswrapper[4783]: I1002 11:18:29.558518 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="106281fe-b164-4c5a-9597-34bc34658415" path="/var/lib/kubelet/pods/106281fe-b164-4c5a-9597-34bc34658415/volumes" Oct 02 11:18:30 crc kubenswrapper[4783]: I1002 11:18:30.526195 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Oct 02 11:18:30 crc kubenswrapper[4783]: I1002 11:18:30.633912 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 02 11:18:30 crc kubenswrapper[4783]: I1002 11:18:30.902454 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 02 11:18:30 crc kubenswrapper[4783]: I1002 11:18:30.902550 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 02 11:18:31 crc kubenswrapper[4783]: I1002 11:18:31.918717 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:31 crc kubenswrapper[4783]: I1002 11:18:31.918728 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:32 crc kubenswrapper[4783]: I1002 11:18:32.353336 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:32 crc kubenswrapper[4783]: I1002 11:18:32.353389 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:32 crc kubenswrapper[4783]: I1002 11:18:32.406181 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:32 crc kubenswrapper[4783]: I1002 11:18:32.981404 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:18:33 crc kubenswrapper[4783]: I1002 11:18:33.391775 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:33 crc kubenswrapper[4783]: I1002 11:18:33.444083 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xhh9j"] Oct 02 11:18:33 crc kubenswrapper[4783]: I1002 11:18:33.894819 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 02 11:18:33 crc kubenswrapper[4783]: I1002 11:18:33.894964 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 02 11:18:34 crc kubenswrapper[4783]: I1002 11:18:34.977670 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:34 crc kubenswrapper[4783]: I1002 11:18:34.977711 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.199:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:18:35 crc kubenswrapper[4783]: I1002 11:18:35.362571 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-xhh9j" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="registry-server" containerID="cri-o://0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268" gracePeriod=2 Oct 02 11:18:35 crc kubenswrapper[4783]: I1002 11:18:35.634176 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 02 11:18:35 crc kubenswrapper[4783]: I1002 11:18:35.666171 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 02 11:18:35 crc kubenswrapper[4783]: I1002 11:18:35.859216 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.008303 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-utilities\") pod \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.008398 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-catalog-content\") pod \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.008542 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9wtl\" (UniqueName: \"kubernetes.io/projected/f8cc2cdc-b576-402e-871a-4e8f6a604c62-kube-api-access-m9wtl\") pod \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\" (UID: \"f8cc2cdc-b576-402e-871a-4e8f6a604c62\") " Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.009131 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-utilities" (OuterVolumeSpecName: "utilities") pod "f8cc2cdc-b576-402e-871a-4e8f6a604c62" (UID: "f8cc2cdc-b576-402e-871a-4e8f6a604c62"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.015092 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f8cc2cdc-b576-402e-871a-4e8f6a604c62-kube-api-access-m9wtl" (OuterVolumeSpecName: "kube-api-access-m9wtl") pod "f8cc2cdc-b576-402e-871a-4e8f6a604c62" (UID: "f8cc2cdc-b576-402e-871a-4e8f6a604c62"). InnerVolumeSpecName "kube-api-access-m9wtl". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.047645 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f8cc2cdc-b576-402e-871a-4e8f6a604c62" (UID: "f8cc2cdc-b576-402e-871a-4e8f6a604c62"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.110664 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.111386 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8cc2cdc-b576-402e-871a-4e8f6a604c62-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.111430 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9wtl\" (UniqueName: \"kubernetes.io/projected/f8cc2cdc-b576-402e-871a-4e8f6a604c62-kube-api-access-m9wtl\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.373950 4783 generic.go:334] "Generic (PLEG): container finished" podID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerID="0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268" exitCode=0 Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.373995 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-xhh9j" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.374044 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xhh9j" event={"ID":"f8cc2cdc-b576-402e-871a-4e8f6a604c62","Type":"ContainerDied","Data":"0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268"} Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.374101 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-xhh9j" event={"ID":"f8cc2cdc-b576-402e-871a-4e8f6a604c62","Type":"ContainerDied","Data":"c8a655c5a94837cc68d33f3831bdc6149023f75c17b24788489a78b48a868ec8"} Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.374127 4783 scope.go:117] "RemoveContainer" containerID="0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.419891 4783 scope.go:117] "RemoveContainer" containerID="76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.425964 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-xhh9j"] Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.439614 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-xhh9j"] Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.450431 4783 scope.go:117] "RemoveContainer" containerID="8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.453514 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.500781 4783 scope.go:117] "RemoveContainer" containerID="0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268" Oct 02 11:18:36 crc kubenswrapper[4783]: E1002 11:18:36.502527 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268\": container with ID starting with 0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268 not found: ID does not exist" containerID="0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.502591 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268"} err="failed to get container status \"0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268\": rpc error: code = NotFound desc = could not find container \"0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268\": container with ID starting with 0cd94e2fb61e7c56d9dd4a5e59d2987c63822cbe19d9520fb68d22dcf1b1e268 not found: ID does not exist" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.502614 4783 scope.go:117] "RemoveContainer" containerID="76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f" Oct 02 11:18:36 crc kubenswrapper[4783]: E1002 11:18:36.503086 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f\": container with ID starting with 76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f not found: ID does not exist" containerID="76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.503117 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f"} err="failed to get container status \"76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f\": rpc error: code = NotFound desc = could not find container \"76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f\": container with ID starting with 76d94d56f935e177ac878c73e28e423d89111cfd8de616aae2f587cf5ec8bc4f not found: ID does not exist" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.503136 4783 scope.go:117] "RemoveContainer" containerID="8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1" Oct 02 11:18:36 crc kubenswrapper[4783]: E1002 11:18:36.503539 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1\": container with ID starting with 8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1 not found: ID does not exist" containerID="8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1" Oct 02 11:18:36 crc kubenswrapper[4783]: I1002 11:18:36.503568 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1"} err="failed to get container status \"8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1\": rpc error: code = NotFound desc = could not find container \"8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1\": container with ID starting with 8d6baeb6e4d6b11f4239bd0b89b80588530a7473d78fd82cb3ae715b2e52b3a1 not found: ID does not exist" Oct 02 11:18:37 crc kubenswrapper[4783]: I1002 11:18:37.566372 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" path="/var/lib/kubelet/pods/f8cc2cdc-b576-402e-871a-4e8f6a604c62/volumes" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.321737 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.393770 4783 generic.go:334] "Generic (PLEG): container finished" podID="aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" containerID="1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f" exitCode=137 Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.393825 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.393848 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b","Type":"ContainerDied","Data":"1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f"} Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.393888 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b","Type":"ContainerDied","Data":"72f9bb032bd2eaa1af5ce1f085772cb13fa786036422b79cfd2aae762e84a9c7"} Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.393908 4783 scope.go:117] "RemoveContainer" containerID="1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.418017 4783 scope.go:117] "RemoveContainer" containerID="1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f" Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.418479 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f\": container with ID starting with 1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f not found: ID does not exist" containerID="1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.418571 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f"} err="failed to get container status \"1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f\": rpc error: code = NotFound desc = could not find container \"1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f\": container with ID starting with 1c7697d95253002f29d4794a4e15294bbf71fcc3ace3e603cc38a9162c00d75f not found: ID does not exist" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.458242 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-config-data\") pod \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.458307 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-combined-ca-bundle\") pod \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.458644 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgzn8\" (UniqueName: \"kubernetes.io/projected/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-kube-api-access-wgzn8\") pod \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\" (UID: \"aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b\") " Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.468665 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-kube-api-access-wgzn8" (OuterVolumeSpecName: "kube-api-access-wgzn8") pod "aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" (UID: "aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b"). InnerVolumeSpecName "kube-api-access-wgzn8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.487280 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-config-data" (OuterVolumeSpecName: "config-data") pod "aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" (UID: "aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.495022 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" (UID: "aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.561611 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgzn8\" (UniqueName: \"kubernetes.io/projected/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-kube-api-access-wgzn8\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.561653 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.561667 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.734453 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.742945 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.757256 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.758386 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="extract-utilities" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758426 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="extract-utilities" Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.758466 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="registry-server" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758475 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="registry-server" Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.758487 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="extract-utilities" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758496 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="extract-utilities" Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.758511 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="registry-server" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758520 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="registry-server" Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.758543 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="extract-content" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758552 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="extract-content" Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.758569 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="extract-content" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758577 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="extract-content" Oct 02 11:18:38 crc kubenswrapper[4783]: E1002 11:18:38.758590 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" containerName="nova-cell1-novncproxy-novncproxy" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758599 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" containerName="nova-cell1-novncproxy-novncproxy" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758828 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" containerName="nova-cell1-novncproxy-novncproxy" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758864 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="f8cc2cdc-b576-402e-871a-4e8f6a604c62" containerName="registry-server" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.758876 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="106281fe-b164-4c5a-9597-34bc34658415" containerName="registry-server" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.760001 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.765648 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.765948 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.765990 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.769780 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.866912 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhpth\" (UniqueName: \"kubernetes.io/projected/55ed1b2f-a444-43e4-9311-411d81bc052d-kube-api-access-xhpth\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.867018 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.867073 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.867475 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.867527 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.970046 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.970150 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.970254 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhpth\" (UniqueName: \"kubernetes.io/projected/55ed1b2f-a444-43e4-9311-411d81bc052d-kube-api-access-xhpth\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.970450 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.970499 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.974088 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.974181 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.974207 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.975976 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/55ed1b2f-a444-43e4-9311-411d81bc052d-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:38 crc kubenswrapper[4783]: I1002 11:18:38.996275 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhpth\" (UniqueName: \"kubernetes.io/projected/55ed1b2f-a444-43e4-9311-411d81bc052d-kube-api-access-xhpth\") pod \"nova-cell1-novncproxy-0\" (UID: \"55ed1b2f-a444-43e4-9311-411d81bc052d\") " pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:39 crc kubenswrapper[4783]: I1002 11:18:39.078931 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:39 crc kubenswrapper[4783]: I1002 11:18:39.526296 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Oct 02 11:18:39 crc kubenswrapper[4783]: W1002 11:18:39.530135 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod55ed1b2f_a444_43e4_9311_411d81bc052d.slice/crio-0c6d040e3634b597e3ad44fbef29e2a6d5f9b90b7140fa316894454a1507d496 WatchSource:0}: Error finding container 0c6d040e3634b597e3ad44fbef29e2a6d5f9b90b7140fa316894454a1507d496: Status 404 returned error can't find the container with id 0c6d040e3634b597e3ad44fbef29e2a6d5f9b90b7140fa316894454a1507d496 Oct 02 11:18:39 crc kubenswrapper[4783]: I1002 11:18:39.557603 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b" path="/var/lib/kubelet/pods/aecd88b6-c737-4b6a-abfe-e9e0b9f5f14b/volumes" Oct 02 11:18:40 crc kubenswrapper[4783]: I1002 11:18:40.416902 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"55ed1b2f-a444-43e4-9311-411d81bc052d","Type":"ContainerStarted","Data":"39fbfba501b553b761648d352934426a90d238ec794d8df4a9de49c703df2ab0"} Oct 02 11:18:40 crc kubenswrapper[4783]: I1002 11:18:40.416965 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"55ed1b2f-a444-43e4-9311-411d81bc052d","Type":"ContainerStarted","Data":"0c6d040e3634b597e3ad44fbef29e2a6d5f9b90b7140fa316894454a1507d496"} Oct 02 11:18:40 crc kubenswrapper[4783]: I1002 11:18:40.450310 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.450290448 podStartE2EDuration="2.450290448s" podCreationTimestamp="2025-10-02 11:18:38 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:40.43687152 +0000 UTC m=+1553.753065781" watchObservedRunningTime="2025-10-02 11:18:40.450290448 +0000 UTC m=+1553.766484709" Oct 02 11:18:40 crc kubenswrapper[4783]: I1002 11:18:40.908379 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 02 11:18:40 crc kubenswrapper[4783]: I1002 11:18:40.909492 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 02 11:18:40 crc kubenswrapper[4783]: I1002 11:18:40.925524 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 02 11:18:41 crc kubenswrapper[4783]: I1002 11:18:41.430472 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 02 11:18:42 crc kubenswrapper[4783]: I1002 11:18:42.981236 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/horizon-5fcdf587dd-wvthh" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" probeResult="failure" output="Get \"https://10.217.0.143:8443/dashboard/auth/login/?next=/dashboard/\": dial tcp 10.217.0.143:8443: connect: connection refused" Oct 02 11:18:42 crc kubenswrapper[4783]: I1002 11:18:42.981680 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:18:43 crc kubenswrapper[4783]: I1002 11:18:43.897269 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 02 11:18:43 crc kubenswrapper[4783]: I1002 11:18:43.897621 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 02 11:18:43 crc kubenswrapper[4783]: I1002 11:18:43.898080 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 02 11:18:43 crc kubenswrapper[4783]: I1002 11:18:43.898176 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 02 11:18:43 crc kubenswrapper[4783]: I1002 11:18:43.901665 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 02 11:18:43 crc kubenswrapper[4783]: I1002 11:18:43.902301 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.080761 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.107473 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-z5qhk"] Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.111834 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.136038 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-z5qhk"] Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.276713 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lf57k\" (UniqueName: \"kubernetes.io/projected/f9e4493b-d398-4e4a-9467-7c08f76c26cf-kube-api-access-lf57k\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.276765 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-config\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.276813 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-nb\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.276873 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-sb\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.276940 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-svc\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.276966 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-swift-storage-0\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.378238 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-config\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.379027 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-nb\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.379245 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-sb\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.379386 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-svc\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.379511 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-swift-storage-0\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.379712 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lf57k\" (UniqueName: \"kubernetes.io/projected/f9e4493b-d398-4e4a-9467-7c08f76c26cf-kube-api-access-lf57k\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.379730 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-config\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.380034 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-nb\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.380471 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-svc\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.381105 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-sb\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.381708 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-swift-storage-0\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.400398 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lf57k\" (UniqueName: \"kubernetes.io/projected/f9e4493b-d398-4e4a-9467-7c08f76c26cf-kube-api-access-lf57k\") pod \"dnsmasq-dns-59cf4bdb65-z5qhk\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.439525 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:44 crc kubenswrapper[4783]: I1002 11:18:44.987900 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-z5qhk"] Oct 02 11:18:45 crc kubenswrapper[4783]: I1002 11:18:45.466728 4783 generic.go:334] "Generic (PLEG): container finished" podID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerID="5fb306823d6bed24927f52ec15273f818e03d796730c9f6931f75ba1922e9bc7" exitCode=0 Oct 02 11:18:45 crc kubenswrapper[4783]: I1002 11:18:45.468643 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" event={"ID":"f9e4493b-d398-4e4a-9467-7c08f76c26cf","Type":"ContainerDied","Data":"5fb306823d6bed24927f52ec15273f818e03d796730c9f6931f75ba1922e9bc7"} Oct 02 11:18:45 crc kubenswrapper[4783]: I1002 11:18:45.468691 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" event={"ID":"f9e4493b-d398-4e4a-9467-7c08f76c26cf","Type":"ContainerStarted","Data":"540b42c5f0ace69aa9cafc471d6ebb5a3dd934f1293d516155644a51c20cbaa1"} Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.394487 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.395189 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-central-agent" containerID="cri-o://8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121" gracePeriod=30 Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.395324 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="proxy-httpd" containerID="cri-o://2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761" gracePeriod=30 Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.395247 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="sg-core" containerID="cri-o://21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09" gracePeriod=30 Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.395614 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-notification-agent" containerID="cri-o://28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715" gracePeriod=30 Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.424213 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.480954 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-log" containerID="cri-o://5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df" gracePeriod=30 Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.482275 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" event={"ID":"f9e4493b-d398-4e4a-9467-7c08f76c26cf","Type":"ContainerStarted","Data":"2bb9f2859174c9d5c6e2b07cb3bd6cd227d137a5a447cec6aa381c5773922f06"} Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.482332 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.482816 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-api" containerID="cri-o://d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d" gracePeriod=30 Oct 02 11:18:46 crc kubenswrapper[4783]: I1002 11:18:46.526143 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" podStartSLOduration=2.5261236240000002 podStartE2EDuration="2.526123624s" podCreationTimestamp="2025-10-02 11:18:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:46.508391678 +0000 UTC m=+1559.824585949" watchObservedRunningTime="2025-10-02 11:18:46.526123624 +0000 UTC m=+1559.842317885" Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.494081 4783 generic.go:334] "Generic (PLEG): container finished" podID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerID="5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df" exitCode=143 Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.494171 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d656e193-b275-4f25-a1f4-6be84d785a5c","Type":"ContainerDied","Data":"5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df"} Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.497615 4783 generic.go:334] "Generic (PLEG): container finished" podID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerID="2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761" exitCode=0 Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.497644 4783 generic.go:334] "Generic (PLEG): container finished" podID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerID="21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09" exitCode=2 Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.497656 4783 generic.go:334] "Generic (PLEG): container finished" podID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerID="8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121" exitCode=0 Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.497690 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerDied","Data":"2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761"} Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.497733 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerDied","Data":"21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09"} Oct 02 11:18:47 crc kubenswrapper[4783]: I1002 11:18:47.497748 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerDied","Data":"8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121"} Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.530990 4783 generic.go:334] "Generic (PLEG): container finished" podID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerID="617e73daab1fa7d04c236e9a25fd46b5542b5fb3258901a035760e38676e12d9" exitCode=137 Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.531392 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerDied","Data":"617e73daab1fa7d04c236e9a25fd46b5542b5fb3258901a035760e38676e12d9"} Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.531602 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/horizon-5fcdf587dd-wvthh" event={"ID":"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba","Type":"ContainerDied","Data":"f71c1c3a0a71f25968d4e14e51f8b0e7503b8a2996217ec14dca02b29bce2a6b"} Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.531620 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f71c1c3a0a71f25968d4e14e51f8b0e7503b8a2996217ec14dca02b29bce2a6b" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.545814 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.672765 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-combined-ca-bundle\") pod \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.672890 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-tls-certs\") pod \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.672928 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-config-data\") pod \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.672956 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-llrd6\" (UniqueName: \"kubernetes.io/projected/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-kube-api-access-llrd6\") pod \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.673123 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-logs\") pod \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.673232 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-secret-key\") pod \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.673302 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-scripts\") pod \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\" (UID: \"8abc2e1e-de94-4880-8a75-0c7ee0a2cdba\") " Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.673796 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-logs" (OuterVolumeSpecName: "logs") pod "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" (UID: "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.673943 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.678695 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-secret-key" (OuterVolumeSpecName: "horizon-secret-key") pod "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" (UID: "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba"). InnerVolumeSpecName "horizon-secret-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.679798 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-kube-api-access-llrd6" (OuterVolumeSpecName: "kube-api-access-llrd6") pod "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" (UID: "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba"). InnerVolumeSpecName "kube-api-access-llrd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.712078 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-scripts" (OuterVolumeSpecName: "scripts") pod "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" (UID: "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.725190 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" (UID: "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.731714 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-config-data" (OuterVolumeSpecName: "config-data") pod "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" (UID: "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.739155 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-tls-certs" (OuterVolumeSpecName: "horizon-tls-certs") pod "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" (UID: "8abc2e1e-de94-4880-8a75-0c7ee0a2cdba"). InnerVolumeSpecName "horizon-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.775338 4783 reconciler_common.go:293] "Volume detached for volume \"horizon-secret-key\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-secret-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.775373 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.775385 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.775396 4783 reconciler_common.go:293] "Volume detached for volume \"horizon-tls-certs\" (UniqueName: \"kubernetes.io/secret/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-horizon-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.775427 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:48 crc kubenswrapper[4783]: I1002 11:18:48.775440 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-llrd6\" (UniqueName: \"kubernetes.io/projected/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba-kube-api-access-llrd6\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.079532 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.105463 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.539560 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/horizon-5fcdf587dd-wvthh" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.581812 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.592146 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/horizon-5fcdf587dd-wvthh"] Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.615585 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/horizon-5fcdf587dd-wvthh"] Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.781993 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-mgxlb"] Oct 02 11:18:49 crc kubenswrapper[4783]: E1002 11:18:49.783438 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.783466 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: E1002 11:18:49.784331 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784352 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: E1002 11:18:49.784390 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon-log" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784401 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon-log" Oct 02 11:18:49 crc kubenswrapper[4783]: E1002 11:18:49.784427 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784434 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: E1002 11:18:49.784458 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784465 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784744 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon-log" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784781 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784802 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.784812 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.785654 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.792566 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.793019 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.807069 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgxlb"] Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.897891 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdt5h\" (UniqueName: \"kubernetes.io/projected/82576b63-a496-439f-a656-f89a0dc00ab9-kube-api-access-kdt5h\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.898025 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.898093 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-config-data\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:49 crc kubenswrapper[4783]: I1002 11:18:49.898270 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-scripts\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.000431 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdt5h\" (UniqueName: \"kubernetes.io/projected/82576b63-a496-439f-a656-f89a0dc00ab9-kube-api-access-kdt5h\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.000515 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.000539 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-config-data\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.000616 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-scripts\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.026264 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.030198 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-config-data\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.065378 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdt5h\" (UniqueName: \"kubernetes.io/projected/82576b63-a496-439f-a656-f89a0dc00ab9-kube-api-access-kdt5h\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.069067 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-scripts\") pod \"nova-cell1-cell-mapping-mgxlb\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.129033 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.262856 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.410599 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-combined-ca-bundle\") pod \"d656e193-b275-4f25-a1f4-6be84d785a5c\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.410654 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-config-data\") pod \"d656e193-b275-4f25-a1f4-6be84d785a5c\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.410679 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d656e193-b275-4f25-a1f4-6be84d785a5c-logs\") pod \"d656e193-b275-4f25-a1f4-6be84d785a5c\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.410895 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ffp4j\" (UniqueName: \"kubernetes.io/projected/d656e193-b275-4f25-a1f4-6be84d785a5c-kube-api-access-ffp4j\") pod \"d656e193-b275-4f25-a1f4-6be84d785a5c\" (UID: \"d656e193-b275-4f25-a1f4-6be84d785a5c\") " Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.411965 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d656e193-b275-4f25-a1f4-6be84d785a5c-logs" (OuterVolumeSpecName: "logs") pod "d656e193-b275-4f25-a1f4-6be84d785a5c" (UID: "d656e193-b275-4f25-a1f4-6be84d785a5c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.416854 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d656e193-b275-4f25-a1f4-6be84d785a5c-kube-api-access-ffp4j" (OuterVolumeSpecName: "kube-api-access-ffp4j") pod "d656e193-b275-4f25-a1f4-6be84d785a5c" (UID: "d656e193-b275-4f25-a1f4-6be84d785a5c"). InnerVolumeSpecName "kube-api-access-ffp4j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.436646 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-config-data" (OuterVolumeSpecName: "config-data") pod "d656e193-b275-4f25-a1f4-6be84d785a5c" (UID: "d656e193-b275-4f25-a1f4-6be84d785a5c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.470528 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d656e193-b275-4f25-a1f4-6be84d785a5c" (UID: "d656e193-b275-4f25-a1f4-6be84d785a5c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.514095 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ffp4j\" (UniqueName: \"kubernetes.io/projected/d656e193-b275-4f25-a1f4-6be84d785a5c-kube-api-access-ffp4j\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.514148 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.514160 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d656e193-b275-4f25-a1f4-6be84d785a5c-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.514171 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d656e193-b275-4f25-a1f4-6be84d785a5c-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.555723 4783 generic.go:334] "Generic (PLEG): container finished" podID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerID="d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d" exitCode=0 Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.556638 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.557478 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d656e193-b275-4f25-a1f4-6be84d785a5c","Type":"ContainerDied","Data":"d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d"} Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.557542 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d656e193-b275-4f25-a1f4-6be84d785a5c","Type":"ContainerDied","Data":"6e2471580f45314cddad5d3284220a15eccae40d5f814688ea755610b54e11a0"} Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.557565 4783 scope.go:117] "RemoveContainer" containerID="d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.593366 4783 scope.go:117] "RemoveContainer" containerID="5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.625481 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.630776 4783 scope.go:117] "RemoveContainer" containerID="d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d" Oct 02 11:18:50 crc kubenswrapper[4783]: E1002 11:18:50.632106 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d\": container with ID starting with d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d not found: ID does not exist" containerID="d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.632140 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d"} err="failed to get container status \"d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d\": rpc error: code = NotFound desc = could not find container \"d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d\": container with ID starting with d836319d22643605a08a9f9c365f41d180e0d87c53345fe5f12fe78838fc454d not found: ID does not exist" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.632164 4783 scope.go:117] "RemoveContainer" containerID="5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df" Oct 02 11:18:50 crc kubenswrapper[4783]: E1002 11:18:50.632376 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df\": container with ID starting with 5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df not found: ID does not exist" containerID="5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.632420 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df"} err="failed to get container status \"5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df\": rpc error: code = NotFound desc = could not find container \"5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df\": container with ID starting with 5f3604143df308eaf6f606d2c580280f1e3aa130a25603104e90eace90b8e9df not found: ID does not exist" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.662164 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.674203 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:50 crc kubenswrapper[4783]: E1002 11:18:50.674760 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-api" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.674779 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-api" Oct 02 11:18:50 crc kubenswrapper[4783]: E1002 11:18:50.674795 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-log" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.674804 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-log" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.675018 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-log" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.675038 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" containerName="nova-api-api" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.675051 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" containerName="horizon" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.676070 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.678588 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.678969 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.679260 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.683970 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.767979 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgxlb"] Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.818936 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zszdt\" (UniqueName: \"kubernetes.io/projected/b78ec37f-0100-49cd-a812-d7a6566d656f-kube-api-access-zszdt\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.819181 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b78ec37f-0100-49cd-a812-d7a6566d656f-logs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.819210 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-public-tls-certs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.819258 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.819281 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.819317 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-config-data\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.921557 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.921607 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.921651 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-config-data\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.921710 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zszdt\" (UniqueName: \"kubernetes.io/projected/b78ec37f-0100-49cd-a812-d7a6566d656f-kube-api-access-zszdt\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.921764 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b78ec37f-0100-49cd-a812-d7a6566d656f-logs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.921790 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-public-tls-certs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.922499 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b78ec37f-0100-49cd-a812-d7a6566d656f-logs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.925813 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-public-tls-certs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.925950 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.929082 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-config-data\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.938388 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-internal-tls-certs\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:50 crc kubenswrapper[4783]: I1002 11:18:50.939622 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zszdt\" (UniqueName: \"kubernetes.io/projected/b78ec37f-0100-49cd-a812-d7a6566d656f-kube-api-access-zszdt\") pod \"nova-api-0\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " pod="openstack/nova-api-0" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.059910 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.467176 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.517905 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.517970 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.518024 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.519548 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.519630 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" gracePeriod=600 Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.583294 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8abc2e1e-de94-4880-8a75-0c7ee0a2cdba" path="/var/lib/kubelet/pods/8abc2e1e-de94-4880-8a75-0c7ee0a2cdba/volumes" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.584236 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d656e193-b275-4f25-a1f4-6be84d785a5c" path="/var/lib/kubelet/pods/d656e193-b275-4f25-a1f4-6be84d785a5c/volumes" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.591998 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.615539 4783 generic.go:334] "Generic (PLEG): container finished" podID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerID="28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715" exitCode=0 Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.615608 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerDied","Data":"28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715"} Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.615637 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"cbdb97bd-c226-45ae-943c-04c8530c6df7","Type":"ContainerDied","Data":"d796b59aeddd54b047d26608f99fa4522059abf8ec5c7d14010ed88d9f70669d"} Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.615653 4783 scope.go:117] "RemoveContainer" containerID="2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.615651 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.621599 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgxlb" event={"ID":"82576b63-a496-439f-a656-f89a0dc00ab9","Type":"ContainerStarted","Data":"ca2acf093e1a10a32251148a745d912fd1e659a9eb64dd8fa37e8064ca23c6c3"} Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.621644 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgxlb" event={"ID":"82576b63-a496-439f-a656-f89a0dc00ab9","Type":"ContainerStarted","Data":"531d024be688cb3ebc8b417788c8e8deb543583e435e6b53557bbae21bef6e2a"} Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.644893 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-ceilometer-tls-certs\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.644943 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-combined-ca-bundle\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.645262 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-run-httpd\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.645299 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-sg-core-conf-yaml\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.645392 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-log-httpd\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.645472 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-config-data\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.645506 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wpsfd\" (UniqueName: \"kubernetes.io/projected/cbdb97bd-c226-45ae-943c-04c8530c6df7-kube-api-access-wpsfd\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.645555 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-scripts\") pod \"cbdb97bd-c226-45ae-943c-04c8530c6df7\" (UID: \"cbdb97bd-c226-45ae-943c-04c8530c6df7\") " Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.648701 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.649296 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.670884 4783 scope.go:117] "RemoveContainer" containerID="21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.674749 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cbdb97bd-c226-45ae-943c-04c8530c6df7-kube-api-access-wpsfd" (OuterVolumeSpecName: "kube-api-access-wpsfd") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "kube-api-access-wpsfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.675562 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-mgxlb" podStartSLOduration=2.6755449049999998 podStartE2EDuration="2.675544905s" podCreationTimestamp="2025-10-02 11:18:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:51.637654367 +0000 UTC m=+1564.953848628" watchObservedRunningTime="2025-10-02 11:18:51.675544905 +0000 UTC m=+1564.991739166" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.677811 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.679232 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-scripts" (OuterVolumeSpecName: "scripts") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.697177 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.714897 4783 scope.go:117] "RemoveContainer" containerID="28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.737714 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.749271 4783 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.749295 4783 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-log-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.749304 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wpsfd\" (UniqueName: \"kubernetes.io/projected/cbdb97bd-c226-45ae-943c-04c8530c6df7-kube-api-access-wpsfd\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.749314 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.749322 4783 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.749330 4783 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/cbdb97bd-c226-45ae-943c-04c8530c6df7-run-httpd\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.758575 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.812854 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-config-data" (OuterVolumeSpecName: "config-data") pod "cbdb97bd-c226-45ae-943c-04c8530c6df7" (UID: "cbdb97bd-c226-45ae-943c-04c8530c6df7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.851234 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.851378 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/cbdb97bd-c226-45ae-943c-04c8530c6df7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.912752 4783 scope.go:117] "RemoveContainer" containerID="8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.936988 4783 scope.go:117] "RemoveContainer" containerID="2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.937369 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761\": container with ID starting with 2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761 not found: ID does not exist" containerID="2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.937426 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761"} err="failed to get container status \"2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761\": rpc error: code = NotFound desc = could not find container \"2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761\": container with ID starting with 2ab006276d64b11347c490641d4ac97d0ea3884801ab5dd5c13fed327a33c761 not found: ID does not exist" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.937455 4783 scope.go:117] "RemoveContainer" containerID="21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.937786 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09\": container with ID starting with 21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09 not found: ID does not exist" containerID="21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.937849 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09"} err="failed to get container status \"21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09\": rpc error: code = NotFound desc = could not find container \"21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09\": container with ID starting with 21a8e4d205753ec7a4e72db7455eb1291dc78ada382061fd5846bd39687a9d09 not found: ID does not exist" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.937870 4783 scope.go:117] "RemoveContainer" containerID="28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.943468 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715\": container with ID starting with 28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715 not found: ID does not exist" containerID="28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.943516 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715"} err="failed to get container status \"28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715\": rpc error: code = NotFound desc = could not find container \"28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715\": container with ID starting with 28fa8af072b1f7d22cf969668e74f0dfb9dff62c4d9029c6157e411f3442d715 not found: ID does not exist" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.943547 4783 scope.go:117] "RemoveContainer" containerID="8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.945349 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121\": container with ID starting with 8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121 not found: ID does not exist" containerID="8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.945386 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121"} err="failed to get container status \"8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121\": rpc error: code = NotFound desc = could not find container \"8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121\": container with ID starting with 8bbebfad567d89c00f9f3676e1c85972121d186b70d32723c247a278546fa121 not found: ID does not exist" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.952969 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.962776 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.975399 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.975883 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="proxy-httpd" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.975911 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="proxy-httpd" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.975932 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-notification-agent" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.975941 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-notification-agent" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.975966 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-central-agent" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.975974 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-central-agent" Oct 02 11:18:51 crc kubenswrapper[4783]: E1002 11:18:51.975996 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="sg-core" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.976003 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="sg-core" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.976228 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-notification-agent" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.976257 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="sg-core" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.976272 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="ceilometer-central-agent" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.976285 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" containerName="proxy-httpd" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.983123 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.986947 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.987153 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.987306 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Oct 02 11:18:51 crc kubenswrapper[4783]: I1002 11:18:51.998616 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.158756 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-scripts\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.158842 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-run-httpd\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.158878 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.158916 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w985n\" (UniqueName: \"kubernetes.io/projected/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-kube-api-access-w985n\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.158967 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.158988 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-log-httpd\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.159440 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.159488 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-config-data\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.262232 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.262791 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-config-data\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.262960 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-scripts\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.263034 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-run-httpd\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.263144 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.263347 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w985n\" (UniqueName: \"kubernetes.io/projected/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-kube-api-access-w985n\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.263553 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.263646 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-log-httpd\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.264401 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-log-httpd\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.267818 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-run-httpd\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.270157 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-scripts\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.270203 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.271293 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-config-data\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.278100 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.279436 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.290580 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w985n\" (UniqueName: \"kubernetes.io/projected/fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd-kube-api-access-w985n\") pod \"ceilometer-0\" (UID: \"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd\") " pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.314571 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.648224 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" exitCode=0 Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.648571 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04"} Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.648605 4783 scope.go:117] "RemoveContainer" containerID="d68ca58a875615f7dd80b97789e236029261d5b31a2b176dd22b20723a10f851" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.649220 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:18:52 crc kubenswrapper[4783]: E1002 11:18:52.649464 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.658004 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b78ec37f-0100-49cd-a812-d7a6566d656f","Type":"ContainerStarted","Data":"a2d6719a5da2934cc78a8c36ada520d54abcf4bc71b1ec279ccabb2730face6b"} Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.658047 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b78ec37f-0100-49cd-a812-d7a6566d656f","Type":"ContainerStarted","Data":"80df88a4dfe442fb63bd389e19aea355128a1f21d3dc524dcc1bbca21efa2ad4"} Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.658060 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b78ec37f-0100-49cd-a812-d7a6566d656f","Type":"ContainerStarted","Data":"c531be152a3be8ba16dcc44e37c595f13d4d95fa9fcf017e7ba2f5b8113b9f26"} Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.687624 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.687600904 podStartE2EDuration="2.687600904s" podCreationTimestamp="2025-10-02 11:18:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:18:52.682847514 +0000 UTC m=+1565.999041775" watchObservedRunningTime="2025-10-02 11:18:52.687600904 +0000 UTC m=+1566.003795165" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.837821 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Oct 02 11:18:52 crc kubenswrapper[4783]: W1002 11:18:52.842627 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfbc080e2_1a84_43b9_8c3c_d6ef8b25a8cd.slice/crio-44d00e0635a58135363bafbf6774f2c29a9676157464ac2ca1267039da1cd570 WatchSource:0}: Error finding container 44d00e0635a58135363bafbf6774f2c29a9676157464ac2ca1267039da1cd570: Status 404 returned error can't find the container with id 44d00e0635a58135363bafbf6774f2c29a9676157464ac2ca1267039da1cd570 Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.881013 4783 scope.go:117] "RemoveContainer" containerID="16ae80fcb7cc46b1d02c66e78777fb52fb5d23f067fb092afec307bd0d8d1fa3" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.932766 4783 scope.go:117] "RemoveContainer" containerID="bcfcefc8facb8e60fce5bfc07ac844bba734127445fd330b70ac307d45ead251" Oct 02 11:18:52 crc kubenswrapper[4783]: I1002 11:18:52.996435 4783 scope.go:117] "RemoveContainer" containerID="9c29fed7b93014049297836339c82e8776595e7344232995cf55de7be2cdd265" Oct 02 11:18:53 crc kubenswrapper[4783]: I1002 11:18:53.564087 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cbdb97bd-c226-45ae-943c-04c8530c6df7" path="/var/lib/kubelet/pods/cbdb97bd-c226-45ae-943c-04c8530c6df7/volumes" Oct 02 11:18:53 crc kubenswrapper[4783]: I1002 11:18:53.674694 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd","Type":"ContainerStarted","Data":"2554d66e1647fc8d9cb82c099dcb9f08eb392fc3d579ad3fd8a461a4b1b46f20"} Oct 02 11:18:53 crc kubenswrapper[4783]: I1002 11:18:53.674749 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd","Type":"ContainerStarted","Data":"44d00e0635a58135363bafbf6774f2c29a9676157464ac2ca1267039da1cd570"} Oct 02 11:18:54 crc kubenswrapper[4783]: I1002 11:18:54.441653 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:18:54 crc kubenswrapper[4783]: I1002 11:18:54.535258 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-76xxs"] Oct 02 11:18:54 crc kubenswrapper[4783]: I1002 11:18:54.535502 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" podUID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerName="dnsmasq-dns" containerID="cri-o://595f0b1a220c6fbd13f0518942708eeee16df3e0a5e1ed7cd30c570b14b0cf45" gracePeriod=10 Oct 02 11:18:54 crc kubenswrapper[4783]: I1002 11:18:54.702901 4783 generic.go:334] "Generic (PLEG): container finished" podID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerID="595f0b1a220c6fbd13f0518942708eeee16df3e0a5e1ed7cd30c570b14b0cf45" exitCode=0 Oct 02 11:18:54 crc kubenswrapper[4783]: I1002 11:18:54.703100 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" event={"ID":"ae944d77-12f5-47c5-90b3-916ff3ca9e91","Type":"ContainerDied","Data":"595f0b1a220c6fbd13f0518942708eeee16df3e0a5e1ed7cd30c570b14b0cf45"} Oct 02 11:18:54 crc kubenswrapper[4783]: I1002 11:18:54.715665 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd","Type":"ContainerStarted","Data":"2e1880b32250f892a40a4b57f048c7b6a28103084642dd2ad1db7570c7bf2df1"} Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.119973 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.250154 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-swift-storage-0\") pod \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.250599 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-sb\") pod \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.250624 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5cwk\" (UniqueName: \"kubernetes.io/projected/ae944d77-12f5-47c5-90b3-916ff3ca9e91-kube-api-access-h5cwk\") pod \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.251401 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-config\") pod \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.251509 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-svc\") pod \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.251560 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-nb\") pod \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\" (UID: \"ae944d77-12f5-47c5-90b3-916ff3ca9e91\") " Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.277612 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ae944d77-12f5-47c5-90b3-916ff3ca9e91-kube-api-access-h5cwk" (OuterVolumeSpecName: "kube-api-access-h5cwk") pod "ae944d77-12f5-47c5-90b3-916ff3ca9e91" (UID: "ae944d77-12f5-47c5-90b3-916ff3ca9e91"). InnerVolumeSpecName "kube-api-access-h5cwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.302139 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-config" (OuterVolumeSpecName: "config") pod "ae944d77-12f5-47c5-90b3-916ff3ca9e91" (UID: "ae944d77-12f5-47c5-90b3-916ff3ca9e91"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.311356 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "ae944d77-12f5-47c5-90b3-916ff3ca9e91" (UID: "ae944d77-12f5-47c5-90b3-916ff3ca9e91"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.333225 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "ae944d77-12f5-47c5-90b3-916ff3ca9e91" (UID: "ae944d77-12f5-47c5-90b3-916ff3ca9e91"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.337864 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "ae944d77-12f5-47c5-90b3-916ff3ca9e91" (UID: "ae944d77-12f5-47c5-90b3-916ff3ca9e91"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.354158 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.354371 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.354495 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.354560 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5cwk\" (UniqueName: \"kubernetes.io/projected/ae944d77-12f5-47c5-90b3-916ff3ca9e91-kube-api-access-h5cwk\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.354625 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.371023 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "ae944d77-12f5-47c5-90b3-916ff3ca9e91" (UID: "ae944d77-12f5-47c5-90b3-916ff3ca9e91"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.455907 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/ae944d77-12f5-47c5-90b3-916ff3ca9e91-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.726145 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.726873 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-845d6d6f59-76xxs" event={"ID":"ae944d77-12f5-47c5-90b3-916ff3ca9e91","Type":"ContainerDied","Data":"cd6f6610aafb6cf9753d733b4416f8927401fcf04582630ecad1e966e9b35e2d"} Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.726902 4783 scope.go:117] "RemoveContainer" containerID="595f0b1a220c6fbd13f0518942708eeee16df3e0a5e1ed7cd30c570b14b0cf45" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.734848 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd","Type":"ContainerStarted","Data":"53ce9691937c72f2e9ec7e436e863e7625656e1f600b49791c935b087add7f70"} Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.787914 4783 scope.go:117] "RemoveContainer" containerID="0119868203a1cabafc90042406872130e7a3d5f651ae8564fc600f77af6f94be" Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.799753 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-76xxs"] Oct 02 11:18:55 crc kubenswrapper[4783]: I1002 11:18:55.810453 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-845d6d6f59-76xxs"] Oct 02 11:18:56 crc kubenswrapper[4783]: I1002 11:18:56.745754 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd","Type":"ContainerStarted","Data":"d2f60b627c949aca97f5d507223690020e339c55f05263463b5db708705a1bc8"} Oct 02 11:18:56 crc kubenswrapper[4783]: I1002 11:18:56.746037 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Oct 02 11:18:56 crc kubenswrapper[4783]: I1002 11:18:56.763998 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.468764875 podStartE2EDuration="5.763980195s" podCreationTimestamp="2025-10-02 11:18:51 +0000 UTC" firstStartedPulling="2025-10-02 11:18:52.84564479 +0000 UTC m=+1566.161839041" lastFinishedPulling="2025-10-02 11:18:56.1408601 +0000 UTC m=+1569.457054361" observedRunningTime="2025-10-02 11:18:56.762828133 +0000 UTC m=+1570.079022394" watchObservedRunningTime="2025-10-02 11:18:56.763980195 +0000 UTC m=+1570.080174456" Oct 02 11:18:57 crc kubenswrapper[4783]: I1002 11:18:57.560365 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" path="/var/lib/kubelet/pods/ae944d77-12f5-47c5-90b3-916ff3ca9e91/volumes" Oct 02 11:18:57 crc kubenswrapper[4783]: I1002 11:18:57.754478 4783 generic.go:334] "Generic (PLEG): container finished" podID="82576b63-a496-439f-a656-f89a0dc00ab9" containerID="ca2acf093e1a10a32251148a745d912fd1e659a9eb64dd8fa37e8064ca23c6c3" exitCode=0 Oct 02 11:18:57 crc kubenswrapper[4783]: I1002 11:18:57.755307 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgxlb" event={"ID":"82576b63-a496-439f-a656-f89a0dc00ab9","Type":"ContainerDied","Data":"ca2acf093e1a10a32251148a745d912fd1e659a9eb64dd8fa37e8064ca23c6c3"} Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.118016 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.224719 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-combined-ca-bundle\") pod \"82576b63-a496-439f-a656-f89a0dc00ab9\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.224818 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdt5h\" (UniqueName: \"kubernetes.io/projected/82576b63-a496-439f-a656-f89a0dc00ab9-kube-api-access-kdt5h\") pod \"82576b63-a496-439f-a656-f89a0dc00ab9\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.224849 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-scripts\") pod \"82576b63-a496-439f-a656-f89a0dc00ab9\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.225149 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-config-data\") pod \"82576b63-a496-439f-a656-f89a0dc00ab9\" (UID: \"82576b63-a496-439f-a656-f89a0dc00ab9\") " Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.232460 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82576b63-a496-439f-a656-f89a0dc00ab9-kube-api-access-kdt5h" (OuterVolumeSpecName: "kube-api-access-kdt5h") pod "82576b63-a496-439f-a656-f89a0dc00ab9" (UID: "82576b63-a496-439f-a656-f89a0dc00ab9"). InnerVolumeSpecName "kube-api-access-kdt5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.232599 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-scripts" (OuterVolumeSpecName: "scripts") pod "82576b63-a496-439f-a656-f89a0dc00ab9" (UID: "82576b63-a496-439f-a656-f89a0dc00ab9"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.258013 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-config-data" (OuterVolumeSpecName: "config-data") pod "82576b63-a496-439f-a656-f89a0dc00ab9" (UID: "82576b63-a496-439f-a656-f89a0dc00ab9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.266963 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "82576b63-a496-439f-a656-f89a0dc00ab9" (UID: "82576b63-a496-439f-a656-f89a0dc00ab9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.327231 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.327274 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.327287 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdt5h\" (UniqueName: \"kubernetes.io/projected/82576b63-a496-439f-a656-f89a0dc00ab9-kube-api-access-kdt5h\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.327296 4783 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/82576b63-a496-439f-a656-f89a0dc00ab9-scripts\") on node \"crc\" DevicePath \"\"" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.772974 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-mgxlb" event={"ID":"82576b63-a496-439f-a656-f89a0dc00ab9","Type":"ContainerDied","Data":"531d024be688cb3ebc8b417788c8e8deb543583e435e6b53557bbae21bef6e2a"} Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.773310 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="531d024be688cb3ebc8b417788c8e8deb543583e435e6b53557bbae21bef6e2a" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.773015 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-mgxlb" Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.950936 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.951273 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-log" containerID="cri-o://80df88a4dfe442fb63bd389e19aea355128a1f21d3dc524dcc1bbca21efa2ad4" gracePeriod=30 Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.951386 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-api" containerID="cri-o://a2d6719a5da2934cc78a8c36ada520d54abcf4bc71b1ec279ccabb2730face6b" gracePeriod=30 Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.984641 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:18:59 crc kubenswrapper[4783]: I1002 11:18:59.984893 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="e89f85ef-910d-4b8f-8273-a59e2435f4e4" containerName="nova-scheduler-scheduler" containerID="cri-o://a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" gracePeriod=30 Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.005192 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.005506 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-log" containerID="cri-o://4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd" gracePeriod=30 Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.005995 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-metadata" containerID="cri-o://4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104" gracePeriod=30 Oct 02 11:19:00 crc kubenswrapper[4783]: E1002 11:19:00.636581 4783 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 02 11:19:00 crc kubenswrapper[4783]: E1002 11:19:00.638863 4783 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 02 11:19:00 crc kubenswrapper[4783]: E1002 11:19:00.640921 4783 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Oct 02 11:19:00 crc kubenswrapper[4783]: E1002 11:19:00.640995 4783 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="e89f85ef-910d-4b8f-8273-a59e2435f4e4" containerName="nova-scheduler-scheduler" Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.784551 4783 generic.go:334] "Generic (PLEG): container finished" podID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerID="4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd" exitCode=143 Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.784629 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1cb28c52-08fa-4a54-86d0-445e9c7706a9","Type":"ContainerDied","Data":"4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd"} Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.786933 4783 generic.go:334] "Generic (PLEG): container finished" podID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerID="a2d6719a5da2934cc78a8c36ada520d54abcf4bc71b1ec279ccabb2730face6b" exitCode=0 Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.786967 4783 generic.go:334] "Generic (PLEG): container finished" podID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerID="80df88a4dfe442fb63bd389e19aea355128a1f21d3dc524dcc1bbca21efa2ad4" exitCode=143 Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.786948 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b78ec37f-0100-49cd-a812-d7a6566d656f","Type":"ContainerDied","Data":"a2d6719a5da2934cc78a8c36ada520d54abcf4bc71b1ec279ccabb2730face6b"} Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.787015 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b78ec37f-0100-49cd-a812-d7a6566d656f","Type":"ContainerDied","Data":"80df88a4dfe442fb63bd389e19aea355128a1f21d3dc524dcc1bbca21efa2ad4"} Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.787039 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"b78ec37f-0100-49cd-a812-d7a6566d656f","Type":"ContainerDied","Data":"c531be152a3be8ba16dcc44e37c595f13d4d95fa9fcf017e7ba2f5b8113b9f26"} Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.787050 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c531be152a3be8ba16dcc44e37c595f13d4d95fa9fcf017e7ba2f5b8113b9f26" Oct 02 11:19:00 crc kubenswrapper[4783]: I1002 11:19:00.903740 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.056762 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b78ec37f-0100-49cd-a812-d7a6566d656f-logs\") pod \"b78ec37f-0100-49cd-a812-d7a6566d656f\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.056839 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-combined-ca-bundle\") pod \"b78ec37f-0100-49cd-a812-d7a6566d656f\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.056930 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-internal-tls-certs\") pod \"b78ec37f-0100-49cd-a812-d7a6566d656f\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.056947 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-config-data\") pod \"b78ec37f-0100-49cd-a812-d7a6566d656f\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.056962 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-public-tls-certs\") pod \"b78ec37f-0100-49cd-a812-d7a6566d656f\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.057045 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zszdt\" (UniqueName: \"kubernetes.io/projected/b78ec37f-0100-49cd-a812-d7a6566d656f-kube-api-access-zszdt\") pod \"b78ec37f-0100-49cd-a812-d7a6566d656f\" (UID: \"b78ec37f-0100-49cd-a812-d7a6566d656f\") " Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.058458 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b78ec37f-0100-49cd-a812-d7a6566d656f-logs" (OuterVolumeSpecName: "logs") pod "b78ec37f-0100-49cd-a812-d7a6566d656f" (UID: "b78ec37f-0100-49cd-a812-d7a6566d656f"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.064621 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b78ec37f-0100-49cd-a812-d7a6566d656f-kube-api-access-zszdt" (OuterVolumeSpecName: "kube-api-access-zszdt") pod "b78ec37f-0100-49cd-a812-d7a6566d656f" (UID: "b78ec37f-0100-49cd-a812-d7a6566d656f"). InnerVolumeSpecName "kube-api-access-zszdt". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.122571 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b78ec37f-0100-49cd-a812-d7a6566d656f" (UID: "b78ec37f-0100-49cd-a812-d7a6566d656f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.138689 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-config-data" (OuterVolumeSpecName: "config-data") pod "b78ec37f-0100-49cd-a812-d7a6566d656f" (UID: "b78ec37f-0100-49cd-a812-d7a6566d656f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.160212 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zszdt\" (UniqueName: \"kubernetes.io/projected/b78ec37f-0100-49cd-a812-d7a6566d656f-kube-api-access-zszdt\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.160762 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b78ec37f-0100-49cd-a812-d7a6566d656f-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.160859 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.160916 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.164613 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b78ec37f-0100-49cd-a812-d7a6566d656f" (UID: "b78ec37f-0100-49cd-a812-d7a6566d656f"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.201777 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b78ec37f-0100-49cd-a812-d7a6566d656f" (UID: "b78ec37f-0100-49cd-a812-d7a6566d656f"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.262560 4783 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.262773 4783 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b78ec37f-0100-49cd-a812-d7a6566d656f-public-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.796062 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.823930 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.835504 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.855401 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Oct 02 11:19:01 crc kubenswrapper[4783]: E1002 11:19:01.855925 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-log" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.855947 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-log" Oct 02 11:19:01 crc kubenswrapper[4783]: E1002 11:19:01.855971 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerName="dnsmasq-dns" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.855980 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerName="dnsmasq-dns" Oct 02 11:19:01 crc kubenswrapper[4783]: E1002 11:19:01.855997 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-api" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.856007 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-api" Oct 02 11:19:01 crc kubenswrapper[4783]: E1002 11:19:01.856023 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82576b63-a496-439f-a656-f89a0dc00ab9" containerName="nova-manage" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.856031 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="82576b63-a496-439f-a656-f89a0dc00ab9" containerName="nova-manage" Oct 02 11:19:01 crc kubenswrapper[4783]: E1002 11:19:01.856060 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerName="init" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.856069 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerName="init" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.856282 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-log" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.856313 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ae944d77-12f5-47c5-90b3-916ff3ca9e91" containerName="dnsmasq-dns" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.856325 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="82576b63-a496-439f-a656-f89a0dc00ab9" containerName="nova-manage" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.856347 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" containerName="nova-api-api" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.857641 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.860294 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.862317 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.862857 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.875437 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.976946 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-config-data\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.977014 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rvvmc\" (UniqueName: \"kubernetes.io/projected/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-kube-api-access-rvvmc\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.977050 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.977251 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.977436 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-public-tls-certs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:01 crc kubenswrapper[4783]: I1002 11:19:01.977678 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-logs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.079448 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-public-tls-certs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.079591 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-logs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.079711 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-config-data\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.079749 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rvvmc\" (UniqueName: \"kubernetes.io/projected/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-kube-api-access-rvvmc\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.079794 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.079860 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.081398 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-logs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.086338 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-public-tls-certs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.087174 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.090734 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-internal-tls-certs\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.099800 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-config-data\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.109983 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rvvmc\" (UniqueName: \"kubernetes.io/projected/e8bf2ea3-c311-4aa8-becd-9b6b38974a65-kube-api-access-rvvmc\") pod \"nova-api-0\" (UID: \"e8bf2ea3-c311-4aa8-becd-9b6b38974a65\") " pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.179702 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.647746 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Oct 02 11:19:02 crc kubenswrapper[4783]: W1002 11:19:02.649715 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8bf2ea3_c311_4aa8_becd_9b6b38974a65.slice/crio-0e3f70575cb1610e4a93f0f7f787b6cee695375943cb01088142957a67d408fb WatchSource:0}: Error finding container 0e3f70575cb1610e4a93f0f7f787b6cee695375943cb01088142957a67d408fb: Status 404 returned error can't find the container with id 0e3f70575cb1610e4a93f0f7f787b6cee695375943cb01088142957a67d408fb Oct 02 11:19:02 crc kubenswrapper[4783]: I1002 11:19:02.808305 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8bf2ea3-c311-4aa8-becd-9b6b38974a65","Type":"ContainerStarted","Data":"0e3f70575cb1610e4a93f0f7f787b6cee695375943cb01088142957a67d408fb"} Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.168527 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:53190->10.217.0.197:8775: read: connection reset by peer" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.168613 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.197:8775/\": read tcp 10.217.0.2:53196->10.217.0.197:8775: read: connection reset by peer" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.576306 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b78ec37f-0100-49cd-a812-d7a6566d656f" path="/var/lib/kubelet/pods/b78ec37f-0100-49cd-a812-d7a6566d656f/volumes" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.589194 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.722161 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-nova-metadata-tls-certs\") pod \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.722306 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cb28c52-08fa-4a54-86d0-445e9c7706a9-logs\") pod \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.722384 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-config-data\") pod \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.722444 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-combined-ca-bundle\") pod \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.722635 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8j6f\" (UniqueName: \"kubernetes.io/projected/1cb28c52-08fa-4a54-86d0-445e9c7706a9-kube-api-access-v8j6f\") pod \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\" (UID: \"1cb28c52-08fa-4a54-86d0-445e9c7706a9\") " Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.723158 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1cb28c52-08fa-4a54-86d0-445e9c7706a9-logs" (OuterVolumeSpecName: "logs") pod "1cb28c52-08fa-4a54-86d0-445e9c7706a9" (UID: "1cb28c52-08fa-4a54-86d0-445e9c7706a9"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.728748 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1cb28c52-08fa-4a54-86d0-445e9c7706a9-kube-api-access-v8j6f" (OuterVolumeSpecName: "kube-api-access-v8j6f") pod "1cb28c52-08fa-4a54-86d0-445e9c7706a9" (UID: "1cb28c52-08fa-4a54-86d0-445e9c7706a9"). InnerVolumeSpecName "kube-api-access-v8j6f". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.771317 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-config-data" (OuterVolumeSpecName: "config-data") pod "1cb28c52-08fa-4a54-86d0-445e9c7706a9" (UID: "1cb28c52-08fa-4a54-86d0-445e9c7706a9"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.782480 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1cb28c52-08fa-4a54-86d0-445e9c7706a9" (UID: "1cb28c52-08fa-4a54-86d0-445e9c7706a9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.815359 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "1cb28c52-08fa-4a54-86d0-445e9c7706a9" (UID: "1cb28c52-08fa-4a54-86d0-445e9c7706a9"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.823317 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8bf2ea3-c311-4aa8-becd-9b6b38974a65","Type":"ContainerStarted","Data":"89b0368ed24a40f0bd49e47830905c1a3813859261e6dba239d09113df5741a6"} Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.823868 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"e8bf2ea3-c311-4aa8-becd-9b6b38974a65","Type":"ContainerStarted","Data":"0e2542c3814aa453b38e64c10c53e167933359023be71f82dd11de48de8d086a"} Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.825302 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8j6f\" (UniqueName: \"kubernetes.io/projected/1cb28c52-08fa-4a54-86d0-445e9c7706a9-kube-api-access-v8j6f\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.825345 4783 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.825358 4783 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/1cb28c52-08fa-4a54-86d0-445e9c7706a9-logs\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.825371 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.825384 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1cb28c52-08fa-4a54-86d0-445e9c7706a9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.826700 4783 generic.go:334] "Generic (PLEG): container finished" podID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerID="4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104" exitCode=0 Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.826731 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1cb28c52-08fa-4a54-86d0-445e9c7706a9","Type":"ContainerDied","Data":"4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104"} Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.826755 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"1cb28c52-08fa-4a54-86d0-445e9c7706a9","Type":"ContainerDied","Data":"7d7e126b21400e498017914ee79959c02ec568e58a4d0e6eefde889508a702ef"} Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.826762 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.826772 4783 scope.go:117] "RemoveContainer" containerID="4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.847653 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.847632975 podStartE2EDuration="2.847632975s" podCreationTimestamp="2025-10-02 11:19:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:19:03.839619776 +0000 UTC m=+1577.155814037" watchObservedRunningTime="2025-10-02 11:19:03.847632975 +0000 UTC m=+1577.163827246" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.857598 4783 scope.go:117] "RemoveContainer" containerID="4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.872838 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.882110 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.884471 4783 scope.go:117] "RemoveContainer" containerID="4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104" Oct 02 11:19:03 crc kubenswrapper[4783]: E1002 11:19:03.886189 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104\": container with ID starting with 4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104 not found: ID does not exist" containerID="4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.886305 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104"} err="failed to get container status \"4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104\": rpc error: code = NotFound desc = could not find container \"4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104\": container with ID starting with 4fb99850734680dca4718f69a9896f8317cc418a96c23b87c0fc5054b9aef104 not found: ID does not exist" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.886403 4783 scope.go:117] "RemoveContainer" containerID="4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd" Oct 02 11:19:03 crc kubenswrapper[4783]: E1002 11:19:03.887672 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd\": container with ID starting with 4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd not found: ID does not exist" containerID="4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.887781 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd"} err="failed to get container status \"4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd\": rpc error: code = NotFound desc = could not find container \"4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd\": container with ID starting with 4c661406f83ff442a7846871345be3ad3fbcf3383b4e5472164957914e6715bd not found: ID does not exist" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.891460 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:19:03 crc kubenswrapper[4783]: E1002 11:19:03.891847 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-metadata" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.891869 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-metadata" Oct 02 11:19:03 crc kubenswrapper[4783]: E1002 11:19:03.891895 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-log" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.891929 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-log" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.892132 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-metadata" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.892158 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" containerName="nova-metadata-log" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.893173 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.897943 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.903057 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Oct 02 11:19:03 crc kubenswrapper[4783]: I1002 11:19:03.907612 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.028778 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec427546-ed98-4091-93e6-563f539a9d69-logs\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.028870 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.028922 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-config-data\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.028981 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77gcc\" (UniqueName: \"kubernetes.io/projected/ec427546-ed98-4091-93e6-563f539a9d69-kube-api-access-77gcc\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.029076 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.131073 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77gcc\" (UniqueName: \"kubernetes.io/projected/ec427546-ed98-4091-93e6-563f539a9d69-kube-api-access-77gcc\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.131162 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.131201 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec427546-ed98-4091-93e6-563f539a9d69-logs\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.131243 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.131285 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-config-data\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.132031 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/ec427546-ed98-4091-93e6-563f539a9d69-logs\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.136127 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.136132 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.136363 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ec427546-ed98-4091-93e6-563f539a9d69-config-data\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.147478 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77gcc\" (UniqueName: \"kubernetes.io/projected/ec427546-ed98-4091-93e6-563f539a9d69-kube-api-access-77gcc\") pod \"nova-metadata-0\" (UID: \"ec427546-ed98-4091-93e6-563f539a9d69\") " pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.225315 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.544695 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:19:04 crc kubenswrapper[4783]: E1002 11:19:04.545216 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.689767 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Oct 02 11:19:04 crc kubenswrapper[4783]: W1002 11:19:04.690696 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podec427546_ed98_4091_93e6_563f539a9d69.slice/crio-e54d11d2d01c3f94f42d28d0e5431fcb05714c42ac0339e97d4fa78f0b40da7e WatchSource:0}: Error finding container e54d11d2d01c3f94f42d28d0e5431fcb05714c42ac0339e97d4fa78f0b40da7e: Status 404 returned error can't find the container with id e54d11d2d01c3f94f42d28d0e5431fcb05714c42ac0339e97d4fa78f0b40da7e Oct 02 11:19:04 crc kubenswrapper[4783]: I1002 11:19:04.842673 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ec427546-ed98-4091-93e6-563f539a9d69","Type":"ContainerStarted","Data":"e54d11d2d01c3f94f42d28d0e5431fcb05714c42ac0339e97d4fa78f0b40da7e"} Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.407495 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.556766 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1cb28c52-08fa-4a54-86d0-445e9c7706a9" path="/var/lib/kubelet/pods/1cb28c52-08fa-4a54-86d0-445e9c7706a9/volumes" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.562387 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-config-data\") pod \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.562566 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x44zd\" (UniqueName: \"kubernetes.io/projected/e89f85ef-910d-4b8f-8273-a59e2435f4e4-kube-api-access-x44zd\") pod \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.562644 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-combined-ca-bundle\") pod \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\" (UID: \"e89f85ef-910d-4b8f-8273-a59e2435f4e4\") " Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.566518 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e89f85ef-910d-4b8f-8273-a59e2435f4e4-kube-api-access-x44zd" (OuterVolumeSpecName: "kube-api-access-x44zd") pod "e89f85ef-910d-4b8f-8273-a59e2435f4e4" (UID: "e89f85ef-910d-4b8f-8273-a59e2435f4e4"). InnerVolumeSpecName "kube-api-access-x44zd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.590467 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e89f85ef-910d-4b8f-8273-a59e2435f4e4" (UID: "e89f85ef-910d-4b8f-8273-a59e2435f4e4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.593666 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-config-data" (OuterVolumeSpecName: "config-data") pod "e89f85ef-910d-4b8f-8273-a59e2435f4e4" (UID: "e89f85ef-910d-4b8f-8273-a59e2435f4e4"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.664574 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x44zd\" (UniqueName: \"kubernetes.io/projected/e89f85ef-910d-4b8f-8273-a59e2435f4e4-kube-api-access-x44zd\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.664613 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.664622 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89f85ef-910d-4b8f-8273-a59e2435f4e4-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.852144 4783 generic.go:334] "Generic (PLEG): container finished" podID="e89f85ef-910d-4b8f-8273-a59e2435f4e4" containerID="a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" exitCode=0 Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.852217 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.852231 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e89f85ef-910d-4b8f-8273-a59e2435f4e4","Type":"ContainerDied","Data":"a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884"} Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.852360 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"e89f85ef-910d-4b8f-8273-a59e2435f4e4","Type":"ContainerDied","Data":"c3436badf36167939caf2fe4db13b45f47c46d7372aebf14c4dc5fdf942abeb0"} Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.852391 4783 scope.go:117] "RemoveContainer" containerID="a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.855792 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ec427546-ed98-4091-93e6-563f539a9d69","Type":"ContainerStarted","Data":"ea83ac53174f926cd6c2b91d1d2d0c12014814308c8d64b06d13d3b79a0b6e7c"} Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.855916 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"ec427546-ed98-4091-93e6-563f539a9d69","Type":"ContainerStarted","Data":"2a08c6fed04296c56981e7a01f8448dab379e27be974b59f8eb7183c4c523b30"} Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.885135 4783 scope.go:117] "RemoveContainer" containerID="a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" Oct 02 11:19:05 crc kubenswrapper[4783]: E1002 11:19:05.885755 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884\": container with ID starting with a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884 not found: ID does not exist" containerID="a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.885781 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884"} err="failed to get container status \"a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884\": rpc error: code = NotFound desc = could not find container \"a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884\": container with ID starting with a66e98cb977fe3c273191b6ae7df0ef9c8f55b9fe511fb0586957677b80a0884 not found: ID does not exist" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.888737 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.88871105 podStartE2EDuration="2.88871105s" podCreationTimestamp="2025-10-02 11:19:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:19:05.880490855 +0000 UTC m=+1579.196685116" watchObservedRunningTime="2025-10-02 11:19:05.88871105 +0000 UTC m=+1579.204905311" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.911382 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.930235 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.934227 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:19:05 crc kubenswrapper[4783]: E1002 11:19:05.934723 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e89f85ef-910d-4b8f-8273-a59e2435f4e4" containerName="nova-scheduler-scheduler" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.934795 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e89f85ef-910d-4b8f-8273-a59e2435f4e4" containerName="nova-scheduler-scheduler" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.935077 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="e89f85ef-910d-4b8f-8273-a59e2435f4e4" containerName="nova-scheduler-scheduler" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.935692 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.938321 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Oct 02 11:19:05 crc kubenswrapper[4783]: I1002 11:19:05.945563 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.071696 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7db565dd-9936-429f-8842-9c484199e519-config-data\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.072049 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tc7v9\" (UniqueName: \"kubernetes.io/projected/7db565dd-9936-429f-8842-9c484199e519-kube-api-access-tc7v9\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.072245 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7db565dd-9936-429f-8842-9c484199e519-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.173951 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tc7v9\" (UniqueName: \"kubernetes.io/projected/7db565dd-9936-429f-8842-9c484199e519-kube-api-access-tc7v9\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.174104 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7db565dd-9936-429f-8842-9c484199e519-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.174198 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7db565dd-9936-429f-8842-9c484199e519-config-data\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.179789 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7db565dd-9936-429f-8842-9c484199e519-config-data\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.179977 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7db565dd-9936-429f-8842-9c484199e519-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.192783 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tc7v9\" (UniqueName: \"kubernetes.io/projected/7db565dd-9936-429f-8842-9c484199e519-kube-api-access-tc7v9\") pod \"nova-scheduler-0\" (UID: \"7db565dd-9936-429f-8842-9c484199e519\") " pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.251222 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.683908 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Oct 02 11:19:06 crc kubenswrapper[4783]: W1002 11:19:06.692277 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod7db565dd_9936_429f_8842_9c484199e519.slice/crio-7f56c1000314ba83d6ba39c2a6fc32ed04f1cdb382dfa5ec22dbb0eadbdcafba WatchSource:0}: Error finding container 7f56c1000314ba83d6ba39c2a6fc32ed04f1cdb382dfa5ec22dbb0eadbdcafba: Status 404 returned error can't find the container with id 7f56c1000314ba83d6ba39c2a6fc32ed04f1cdb382dfa5ec22dbb0eadbdcafba Oct 02 11:19:06 crc kubenswrapper[4783]: I1002 11:19:06.873946 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7db565dd-9936-429f-8842-9c484199e519","Type":"ContainerStarted","Data":"7f56c1000314ba83d6ba39c2a6fc32ed04f1cdb382dfa5ec22dbb0eadbdcafba"} Oct 02 11:19:07 crc kubenswrapper[4783]: I1002 11:19:07.569085 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e89f85ef-910d-4b8f-8273-a59e2435f4e4" path="/var/lib/kubelet/pods/e89f85ef-910d-4b8f-8273-a59e2435f4e4/volumes" Oct 02 11:19:07 crc kubenswrapper[4783]: I1002 11:19:07.882308 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"7db565dd-9936-429f-8842-9c484199e519","Type":"ContainerStarted","Data":"952201f97170107a154b58d4bc17981f5ce9eb0e6e76a5613c45fadc7d02435a"} Oct 02 11:19:09 crc kubenswrapper[4783]: I1002 11:19:09.226399 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 02 11:19:09 crc kubenswrapper[4783]: I1002 11:19:09.226458 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Oct 02 11:19:11 crc kubenswrapper[4783]: I1002 11:19:11.251591 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Oct 02 11:19:12 crc kubenswrapper[4783]: I1002 11:19:12.180937 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 02 11:19:12 crc kubenswrapper[4783]: I1002 11:19:12.180978 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Oct 02 11:19:13 crc kubenswrapper[4783]: I1002 11:19:13.196793 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e8bf2ea3-c311-4aa8-becd-9b6b38974a65" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:19:13 crc kubenswrapper[4783]: I1002 11:19:13.196793 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="e8bf2ea3-c311-4aa8-becd-9b6b38974a65" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:19:14 crc kubenswrapper[4783]: I1002 11:19:14.226308 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 02 11:19:14 crc kubenswrapper[4783]: I1002 11:19:14.226778 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Oct 02 11:19:15 crc kubenswrapper[4783]: I1002 11:19:15.242637 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ec427546-ed98-4091-93e6-563f539a9d69" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:19:15 crc kubenswrapper[4783]: I1002 11:19:15.242668 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="ec427546-ed98-4091-93e6-563f539a9d69" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.207:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:19:16 crc kubenswrapper[4783]: I1002 11:19:16.251662 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Oct 02 11:19:16 crc kubenswrapper[4783]: I1002 11:19:16.287675 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Oct 02 11:19:16 crc kubenswrapper[4783]: I1002 11:19:16.313551 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=11.313530658 podStartE2EDuration="11.313530658s" podCreationTimestamp="2025-10-02 11:19:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:19:07.902795475 +0000 UTC m=+1581.218989736" watchObservedRunningTime="2025-10-02 11:19:16.313530658 +0000 UTC m=+1589.629724909" Oct 02 11:19:16 crc kubenswrapper[4783]: I1002 11:19:16.544952 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:19:16 crc kubenswrapper[4783]: E1002 11:19:16.545509 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:19:17 crc kubenswrapper[4783]: I1002 11:19:17.091452 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Oct 02 11:19:22 crc kubenswrapper[4783]: I1002 11:19:22.189407 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 02 11:19:22 crc kubenswrapper[4783]: I1002 11:19:22.191154 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 02 11:19:22 crc kubenswrapper[4783]: I1002 11:19:22.192308 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Oct 02 11:19:22 crc kubenswrapper[4783]: I1002 11:19:22.197912 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 02 11:19:22 crc kubenswrapper[4783]: I1002 11:19:22.323435 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Oct 02 11:19:23 crc kubenswrapper[4783]: I1002 11:19:23.020327 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Oct 02 11:19:23 crc kubenswrapper[4783]: I1002 11:19:23.027592 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Oct 02 11:19:24 crc kubenswrapper[4783]: I1002 11:19:24.238807 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 02 11:19:24 crc kubenswrapper[4783]: I1002 11:19:24.239837 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Oct 02 11:19:24 crc kubenswrapper[4783]: I1002 11:19:24.243875 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 02 11:19:25 crc kubenswrapper[4783]: I1002 11:19:25.044868 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Oct 02 11:19:29 crc kubenswrapper[4783]: I1002 11:19:29.545263 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:19:29 crc kubenswrapper[4783]: E1002 11:19:29.546171 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:19:33 crc kubenswrapper[4783]: I1002 11:19:33.008222 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:19:33 crc kubenswrapper[4783]: I1002 11:19:33.686753 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:19:37 crc kubenswrapper[4783]: I1002 11:19:37.647724 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerName="rabbitmq" containerID="cri-o://d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c" gracePeriod=604797 Oct 02 11:19:37 crc kubenswrapper[4783]: I1002 11:19:37.776361 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-cell1-server-0" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.98:5671: connect: connection refused" Oct 02 11:19:38 crc kubenswrapper[4783]: I1002 11:19:38.150317 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerName="rabbitmq" containerID="cri-o://6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a" gracePeriod=604795 Oct 02 11:19:38 crc kubenswrapper[4783]: I1002 11:19:38.333369 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/rabbitmq-server-0" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerName="rabbitmq" probeResult="failure" output="dial tcp 10.217.0.99:5671: connect: connection refused" Oct 02 11:19:40 crc kubenswrapper[4783]: I1002 11:19:40.545604 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:19:40 crc kubenswrapper[4783]: E1002 11:19:40.546271 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.198721 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.222708 4783 generic.go:334] "Generic (PLEG): container finished" podID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerID="d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c" exitCode=0 Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.222755 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b3b5c94-1a3b-4486-9247-724deab20d81","Type":"ContainerDied","Data":"d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c"} Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.222789 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"3b3b5c94-1a3b-4486-9247-724deab20d81","Type":"ContainerDied","Data":"e9ed8dab9d29705b02dc4d225d69c76075ffd3e4e1d2155d74ce90bdab21ab64"} Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.222805 4783 scope.go:117] "RemoveContainer" containerID="d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.222820 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.249274 4783 scope.go:117] "RemoveContainer" containerID="4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.297461 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-plugins\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.297564 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-config-data\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.297596 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-server-conf\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.298913 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-confd\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.299004 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b3b5c94-1a3b-4486-9247-724deab20d81-pod-info\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.299081 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-tls\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.299146 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.299185 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvdtg\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-kube-api-access-nvdtg\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.299241 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-plugins-conf\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.299287 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b3b5c94-1a3b-4486-9247-724deab20d81-erlang-cookie-secret\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.299321 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-erlang-cookie\") pod \"3b3b5c94-1a3b-4486-9247-724deab20d81\" (UID: \"3b3b5c94-1a3b-4486-9247-724deab20d81\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.301994 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.303757 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.304023 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.304054 4783 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.315354 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.316206 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-kube-api-access-nvdtg" (OuterVolumeSpecName: "kube-api-access-nvdtg") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "kube-api-access-nvdtg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.316686 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3b3b5c94-1a3b-4486-9247-724deab20d81-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.318148 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/3b3b5c94-1a3b-4486-9247-724deab20d81-pod-info" (OuterVolumeSpecName: "pod-info") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.318646 4783 scope.go:117] "RemoveContainer" containerID="d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.318854 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "persistence") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: E1002 11:19:44.321188 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c\": container with ID starting with d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c not found: ID does not exist" containerID="d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.321227 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c"} err="failed to get container status \"d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c\": rpc error: code = NotFound desc = could not find container \"d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c\": container with ID starting with d7e202d124dcc4b1c7dd1a3016a61b09bb5458dcdfdcabaade45a085cb64a32c not found: ID does not exist" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.321256 4783 scope.go:117] "RemoveContainer" containerID="4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58" Oct 02 11:19:44 crc kubenswrapper[4783]: E1002 11:19:44.321631 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58\": container with ID starting with 4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58 not found: ID does not exist" containerID="4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.321653 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58"} err="failed to get container status \"4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58\": rpc error: code = NotFound desc = could not find container \"4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58\": container with ID starting with 4ad86dd5b5b6bbeabc1223e6afe4135532d1280634ed21d294d8ba134487df58 not found: ID does not exist" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.325427 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.356392 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-config-data" (OuterVolumeSpecName: "config-data") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.394331 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-server-conf" (OuterVolumeSpecName: "server-conf") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406558 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406588 4783 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3b3b5c94-1a3b-4486-9247-724deab20d81-server-conf\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406605 4783 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3b3b5c94-1a3b-4486-9247-724deab20d81-pod-info\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406614 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406643 4783 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406655 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvdtg\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-kube-api-access-nvdtg\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406663 4783 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3b3b5c94-1a3b-4486-9247-724deab20d81-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.406671 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.433980 4783 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.464243 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "3b3b5c94-1a3b-4486-9247-724deab20d81" (UID: "3b3b5c94-1a3b-4486-9247-724deab20d81"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.509337 4783 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.509380 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3b3b5c94-1a3b-4486-9247-724deab20d81-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.586557 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.599442 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.622678 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:19:44 crc kubenswrapper[4783]: E1002 11:19:44.623597 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerName="setup-container" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.623626 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerName="setup-container" Oct 02 11:19:44 crc kubenswrapper[4783]: E1002 11:19:44.623706 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerName="rabbitmq" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.623724 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerName="rabbitmq" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.627347 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" containerName="rabbitmq" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.628604 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.631749 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.632060 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.632343 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-mb4cz" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.633646 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.633985 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.634259 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.634668 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.638047 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.677301 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712180 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712492 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-erlang-cookie\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712556 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-confd\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712589 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-plugins-conf\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712655 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-tls\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712682 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-erlang-cookie-secret\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712710 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-config-data\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712730 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-pod-info\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712775 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hxxkz\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-kube-api-access-hxxkz\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712819 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-server-conf\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.712876 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-plugins\") pod \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\" (UID: \"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c\") " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713190 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713228 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713250 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713272 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713293 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713321 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/360fdfae-913f-4be9-985b-26101d3dfb3b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713337 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6k4rh\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-kube-api-access-6k4rh\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713365 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713384 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713402 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/360fdfae-913f-4be9-985b-26101d3dfb3b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.713454 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.716431 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.721773 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-pod-info" (OuterVolumeSpecName: "pod-info") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.723764 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-kube-api-access-hxxkz" (OuterVolumeSpecName: "kube-api-access-hxxkz") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "kube-api-access-hxxkz". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.729771 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.731593 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage08-crc" (OuterVolumeSpecName: "persistence") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "local-storage08-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.737000 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.740983 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.748447 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.766342 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-config-data" (OuterVolumeSpecName: "config-data") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.794652 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-server-conf" (OuterVolumeSpecName: "server-conf") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815383 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815460 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/360fdfae-913f-4be9-985b-26101d3dfb3b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815476 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6k4rh\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-kube-api-access-6k4rh\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815509 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815531 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815550 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/360fdfae-913f-4be9-985b-26101d3dfb3b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815571 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.815647 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816200 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816277 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816327 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816584 4783 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" " Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816613 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816630 4783 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-plugins-conf\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816642 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816656 4783 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816668 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816680 4783 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-pod-info\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816692 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hxxkz\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-kube-api-access-hxxkz\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816704 4783 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-server-conf\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.816716 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.817798 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.819127 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.819599 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.820197 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.820825 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/360fdfae-913f-4be9-985b-26101d3dfb3b-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.820897 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.823627 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.830771 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/360fdfae-913f-4be9-985b-26101d3dfb3b-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.846066 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/360fdfae-913f-4be9-985b-26101d3dfb3b-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.846165 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.862833 4783 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage08-crc" (UniqueName: "kubernetes.io/local-volume/local-storage08-crc") on node "crc" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.864580 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6k4rh\" (UniqueName: \"kubernetes.io/projected/360fdfae-913f-4be9-985b-26101d3dfb3b-kube-api-access-6k4rh\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.896668 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"360fdfae-913f-4be9-985b-26101d3dfb3b\") " pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.924577 4783 reconciler_common.go:293] "Volume detached for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.933441 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" (UID: "3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:44 crc kubenswrapper[4783]: I1002 11:19:44.960327 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.026231 4783 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.235798 4783 generic.go:334] "Generic (PLEG): container finished" podID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerID="6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a" exitCode=0 Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.236061 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c","Type":"ContainerDied","Data":"6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a"} Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.236085 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c","Type":"ContainerDied","Data":"acae24ec679a88645e8aecf4c76a519ee80a4c31a10a590fb4b09c36af990cdc"} Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.236102 4783 scope.go:117] "RemoveContainer" containerID="6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.236227 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.272071 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.272221 4783 scope.go:117] "RemoveContainer" containerID="aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.281015 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.311264 4783 scope.go:117] "RemoveContainer" containerID="6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a" Oct 02 11:19:45 crc kubenswrapper[4783]: E1002 11:19:45.311748 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a\": container with ID starting with 6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a not found: ID does not exist" containerID="6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.311803 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a"} err="failed to get container status \"6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a\": rpc error: code = NotFound desc = could not find container \"6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a\": container with ID starting with 6c7e174a6041901be5e2d41be77720ba19404622023eeedf0f9f60bf7700606a not found: ID does not exist" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.311836 4783 scope.go:117] "RemoveContainer" containerID="aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011" Oct 02 11:19:45 crc kubenswrapper[4783]: E1002 11:19:45.312187 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011\": container with ID starting with aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011 not found: ID does not exist" containerID="aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.312220 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011"} err="failed to get container status \"aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011\": rpc error: code = NotFound desc = could not find container \"aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011\": container with ID starting with aab927f4dae4ca46598a4e07540a8b726e4cb9bda96cba6d1674271516631011 not found: ID does not exist" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.335263 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:19:45 crc kubenswrapper[4783]: E1002 11:19:45.335698 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerName="setup-container" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.335718 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerName="setup-container" Oct 02 11:19:45 crc kubenswrapper[4783]: E1002 11:19:45.335732 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerName="rabbitmq" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.335741 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerName="rabbitmq" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.335924 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" containerName="rabbitmq" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.336888 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.339345 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.339587 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.339731 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.339904 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.340021 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.340128 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.340255 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-9br7t" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.343568 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.417910 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.434932 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-config-data\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.434973 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.435030 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.435664 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.435700 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.435770 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nwc6q\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-kube-api-access-nwc6q\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.435883 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.435920 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.436030 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.436123 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.436206 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.538626 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.538689 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.538718 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.538749 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.538794 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-config-data\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.538951 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.538976 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.539012 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.539028 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.539045 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nwc6q\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-kube-api-access-nwc6q\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.539262 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.539579 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.539867 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-config-data\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.539941 4783 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") device mount path \"/mnt/openstack/pv08\"" pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.540519 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.540761 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.540848 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.543228 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.543470 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.546486 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.552743 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.559689 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c" path="/var/lib/kubelet/pods/3adb2893-4c59-4ba0-8c05-cdfa64dcfc7c/volumes" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.563047 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nwc6q\" (UniqueName: \"kubernetes.io/projected/7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6-kube-api-access-nwc6q\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.564715 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b3b5c94-1a3b-4486-9247-724deab20d81" path="/var/lib/kubelet/pods/3b3b5c94-1a3b-4486-9247-724deab20d81/volumes" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.574751 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage08-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage08-crc\") pod \"rabbitmq-server-0\" (UID: \"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6\") " pod="openstack/rabbitmq-server-0" Oct 02 11:19:45 crc kubenswrapper[4783]: I1002 11:19:45.657511 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Oct 02 11:19:46 crc kubenswrapper[4783]: I1002 11:19:46.105917 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Oct 02 11:19:46 crc kubenswrapper[4783]: I1002 11:19:46.246129 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"360fdfae-913f-4be9-985b-26101d3dfb3b","Type":"ContainerStarted","Data":"be7e6c66aad7857588f42108d9be45542a69f2a41c81fda4d41166fc6692e9a5"} Oct 02 11:19:46 crc kubenswrapper[4783]: I1002 11:19:46.250201 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6","Type":"ContainerStarted","Data":"0fed783f1bf007c555f24feb4c2086c2dd66eceb942587aa8b77fc53f38cf3b3"} Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.260796 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"360fdfae-913f-4be9-985b-26101d3dfb3b","Type":"ContainerStarted","Data":"25d5866b14ca7e7857c2a7f56b911d9cade1a65bf700dcf7f09ee16764d8dad4"} Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.649932 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-5t2rp"] Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.652264 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.657673 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-edpm-ipam" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.663243 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-5t2rp"] Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.783637 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-config\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.783759 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-sb\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.783801 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-svc\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.783868 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-swift-storage-0\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.783927 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-nb\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.783944 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jbv8p\" (UniqueName: \"kubernetes.io/projected/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-kube-api-access-jbv8p\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.783966 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-openstack-edpm-ipam\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.885839 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-nb\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.885895 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jbv8p\" (UniqueName: \"kubernetes.io/projected/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-kube-api-access-jbv8p\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.885927 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-openstack-edpm-ipam\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.885971 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-config\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.886018 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-sb\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.886046 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-svc\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.886108 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-swift-storage-0\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.886890 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-openstack-edpm-ipam\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.887098 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-swift-storage-0\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.887145 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-config\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.887244 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-sb\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.887763 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-svc\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.887910 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-nb\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.914979 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jbv8p\" (UniqueName: \"kubernetes.io/projected/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-kube-api-access-jbv8p\") pod \"dnsmasq-dns-67b789f86c-5t2rp\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:47 crc kubenswrapper[4783]: I1002 11:19:47.980590 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:48 crc kubenswrapper[4783]: I1002 11:19:48.271936 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6","Type":"ContainerStarted","Data":"d8cdb80fce4830975325d95391efffaaa628dba619c7b6096fb0893ea341808d"} Oct 02 11:19:48 crc kubenswrapper[4783]: I1002 11:19:48.425688 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-5t2rp"] Oct 02 11:19:49 crc kubenswrapper[4783]: I1002 11:19:49.282243 4783 generic.go:334] "Generic (PLEG): container finished" podID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerID="00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53" exitCode=0 Oct 02 11:19:49 crc kubenswrapper[4783]: I1002 11:19:49.282301 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" event={"ID":"751e9a32-8be6-4bd8-877b-cbf6ea1d391b","Type":"ContainerDied","Data":"00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53"} Oct 02 11:19:49 crc kubenswrapper[4783]: I1002 11:19:49.282601 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" event={"ID":"751e9a32-8be6-4bd8-877b-cbf6ea1d391b","Type":"ContainerStarted","Data":"e4bc6a4614a7b71706c91eafbb253aec92ab2712d439556e50513fe0020a3205"} Oct 02 11:19:50 crc kubenswrapper[4783]: I1002 11:19:50.292350 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" event={"ID":"751e9a32-8be6-4bd8-877b-cbf6ea1d391b","Type":"ContainerStarted","Data":"d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85"} Oct 02 11:19:50 crc kubenswrapper[4783]: I1002 11:19:50.292728 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:50 crc kubenswrapper[4783]: I1002 11:19:50.312779 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" podStartSLOduration=3.312757082 podStartE2EDuration="3.312757082s" podCreationTimestamp="2025-10-02 11:19:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:19:50.310058398 +0000 UTC m=+1623.626252659" watchObservedRunningTime="2025-10-02 11:19:50.312757082 +0000 UTC m=+1623.628951343" Oct 02 11:19:55 crc kubenswrapper[4783]: I1002 11:19:55.546527 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:19:55 crc kubenswrapper[4783]: E1002 11:19:55.548122 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:19:57 crc kubenswrapper[4783]: I1002 11:19:57.982608 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.049127 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-z5qhk"] Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.049356 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" podUID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerName="dnsmasq-dns" containerID="cri-o://2bb9f2859174c9d5c6e2b07cb3bd6cd227d137a5a447cec6aa381c5773922f06" gracePeriod=10 Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.393910 4783 generic.go:334] "Generic (PLEG): container finished" podID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerID="2bb9f2859174c9d5c6e2b07cb3bd6cd227d137a5a447cec6aa381c5773922f06" exitCode=0 Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.394229 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" event={"ID":"f9e4493b-d398-4e4a-9467-7c08f76c26cf","Type":"ContainerDied","Data":"2bb9f2859174c9d5c6e2b07cb3bd6cd227d137a5a447cec6aa381c5773922f06"} Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.408537 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6bc556cf6f-vwb79"] Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.410120 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.452470 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc556cf6f-vwb79"] Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.518579 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.518639 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-dns-swift-storage-0\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.518666 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-dns-svc\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.518724 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-openstack-edpm-ipam\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.518749 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-config\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.518772 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-96lcg\" (UniqueName: \"kubernetes.io/projected/c3943271-c02d-402e-a2a4-be3a58a82302-kube-api-access-96lcg\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.518806 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-ovsdbserver-nb\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.619941 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-ovsdbserver-nb\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.621395 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.622865 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-dns-swift-storage-0\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.624321 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-dns-svc\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.625880 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-openstack-edpm-ipam\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.626790 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-config\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.625727 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-dns-svc\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.622453 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-ovsdbserver-sb\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.626478 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-openstack-edpm-ipam\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.621240 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-ovsdbserver-nb\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.624073 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-dns-swift-storage-0\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.627478 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c3943271-c02d-402e-a2a4-be3a58a82302-config\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.628631 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-96lcg\" (UniqueName: \"kubernetes.io/projected/c3943271-c02d-402e-a2a4-be3a58a82302-kube-api-access-96lcg\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.653228 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-96lcg\" (UniqueName: \"kubernetes.io/projected/c3943271-c02d-402e-a2a4-be3a58a82302-kube-api-access-96lcg\") pod \"dnsmasq-dns-6bc556cf6f-vwb79\" (UID: \"c3943271-c02d-402e-a2a4-be3a58a82302\") " pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.738013 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.872058 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.933459 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-config\") pod \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.933573 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-svc\") pod \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.933621 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-nb\") pod \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.933671 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-swift-storage-0\") pod \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.933692 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lf57k\" (UniqueName: \"kubernetes.io/projected/f9e4493b-d398-4e4a-9467-7c08f76c26cf-kube-api-access-lf57k\") pod \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.933746 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-sb\") pod \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\" (UID: \"f9e4493b-d398-4e4a-9467-7c08f76c26cf\") " Oct 02 11:19:58 crc kubenswrapper[4783]: I1002 11:19:58.945201 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9e4493b-d398-4e4a-9467-7c08f76c26cf-kube-api-access-lf57k" (OuterVolumeSpecName: "kube-api-access-lf57k") pod "f9e4493b-d398-4e4a-9467-7c08f76c26cf" (UID: "f9e4493b-d398-4e4a-9467-7c08f76c26cf"). InnerVolumeSpecName "kube-api-access-lf57k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.035895 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lf57k\" (UniqueName: \"kubernetes.io/projected/f9e4493b-d398-4e4a-9467-7c08f76c26cf-kube-api-access-lf57k\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.041115 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f9e4493b-d398-4e4a-9467-7c08f76c26cf" (UID: "f9e4493b-d398-4e4a-9467-7c08f76c26cf"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.043659 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6bc556cf6f-vwb79"] Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.051254 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f9e4493b-d398-4e4a-9467-7c08f76c26cf" (UID: "f9e4493b-d398-4e4a-9467-7c08f76c26cf"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.060174 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-config" (OuterVolumeSpecName: "config") pod "f9e4493b-d398-4e4a-9467-7c08f76c26cf" (UID: "f9e4493b-d398-4e4a-9467-7c08f76c26cf"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.069181 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "f9e4493b-d398-4e4a-9467-7c08f76c26cf" (UID: "f9e4493b-d398-4e4a-9467-7c08f76c26cf"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.088817 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "f9e4493b-d398-4e4a-9467-7c08f76c26cf" (UID: "f9e4493b-d398-4e4a-9467-7c08f76c26cf"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.138355 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.138400 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.138432 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.138445 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.138457 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f9e4493b-d398-4e4a-9467-7c08f76c26cf-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.403450 4783 generic.go:334] "Generic (PLEG): container finished" podID="c3943271-c02d-402e-a2a4-be3a58a82302" containerID="86dffa60c7c902649d5466582b379cd9b630141fc25c322c532286574530682c" exitCode=0 Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.403502 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" event={"ID":"c3943271-c02d-402e-a2a4-be3a58a82302","Type":"ContainerDied","Data":"86dffa60c7c902649d5466582b379cd9b630141fc25c322c532286574530682c"} Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.403546 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" event={"ID":"c3943271-c02d-402e-a2a4-be3a58a82302","Type":"ContainerStarted","Data":"d11d48fe3cb3e26a0eb3e839b7b6b5d518b5d6f888f9890d7a3a7bf384f06681"} Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.406585 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" event={"ID":"f9e4493b-d398-4e4a-9467-7c08f76c26cf","Type":"ContainerDied","Data":"540b42c5f0ace69aa9cafc471d6ebb5a3dd934f1293d516155644a51c20cbaa1"} Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.406638 4783 scope.go:117] "RemoveContainer" containerID="2bb9f2859174c9d5c6e2b07cb3bd6cd227d137a5a447cec6aa381c5773922f06" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.406861 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-59cf4bdb65-z5qhk" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.439035 4783 scope.go:117] "RemoveContainer" containerID="5fb306823d6bed24927f52ec15273f818e03d796730c9f6931f75ba1922e9bc7" Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.653714 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-z5qhk"] Oct 02 11:19:59 crc kubenswrapper[4783]: I1002 11:19:59.663143 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-59cf4bdb65-z5qhk"] Oct 02 11:20:00 crc kubenswrapper[4783]: I1002 11:20:00.422378 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" event={"ID":"c3943271-c02d-402e-a2a4-be3a58a82302","Type":"ContainerStarted","Data":"8f0a829f5193df2f74731e6a505d731bcede9bb5149db34e6447755d6f61e47c"} Oct 02 11:20:00 crc kubenswrapper[4783]: I1002 11:20:00.422557 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:20:00 crc kubenswrapper[4783]: I1002 11:20:00.454065 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" podStartSLOduration=2.454047599 podStartE2EDuration="2.454047599s" podCreationTimestamp="2025-10-02 11:19:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:20:00.448901879 +0000 UTC m=+1633.765096140" watchObservedRunningTime="2025-10-02 11:20:00.454047599 +0000 UTC m=+1633.770241860" Oct 02 11:20:01 crc kubenswrapper[4783]: I1002 11:20:01.561253 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" path="/var/lib/kubelet/pods/f9e4493b-d398-4e4a-9467-7c08f76c26cf/volumes" Oct 02 11:20:07 crc kubenswrapper[4783]: I1002 11:20:07.557461 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:20:07 crc kubenswrapper[4783]: E1002 11:20:07.559522 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:20:08 crc kubenswrapper[4783]: I1002 11:20:08.739601 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6bc556cf6f-vwb79" Oct 02 11:20:08 crc kubenswrapper[4783]: I1002 11:20:08.821019 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-5t2rp"] Oct 02 11:20:08 crc kubenswrapper[4783]: I1002 11:20:08.821470 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" podUID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerName="dnsmasq-dns" containerID="cri-o://d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85" gracePeriod=10 Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.274582 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.341832 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-svc\") pod \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.341980 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-config\") pod \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.342054 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jbv8p\" (UniqueName: \"kubernetes.io/projected/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-kube-api-access-jbv8p\") pod \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.342081 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-swift-storage-0\") pod \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.342121 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-nb\") pod \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.342195 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-openstack-edpm-ipam\") pod \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.342219 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-sb\") pod \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\" (UID: \"751e9a32-8be6-4bd8-877b-cbf6ea1d391b\") " Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.350653 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-kube-api-access-jbv8p" (OuterVolumeSpecName: "kube-api-access-jbv8p") pod "751e9a32-8be6-4bd8-877b-cbf6ea1d391b" (UID: "751e9a32-8be6-4bd8-877b-cbf6ea1d391b"). InnerVolumeSpecName "kube-api-access-jbv8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.423493 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-config" (OuterVolumeSpecName: "config") pod "751e9a32-8be6-4bd8-877b-cbf6ea1d391b" (UID: "751e9a32-8be6-4bd8-877b-cbf6ea1d391b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.430489 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-openstack-edpm-ipam" (OuterVolumeSpecName: "openstack-edpm-ipam") pod "751e9a32-8be6-4bd8-877b-cbf6ea1d391b" (UID: "751e9a32-8be6-4bd8-877b-cbf6ea1d391b"). InnerVolumeSpecName "openstack-edpm-ipam". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.439612 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "751e9a32-8be6-4bd8-877b-cbf6ea1d391b" (UID: "751e9a32-8be6-4bd8-877b-cbf6ea1d391b"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.440490 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "751e9a32-8be6-4bd8-877b-cbf6ea1d391b" (UID: "751e9a32-8be6-4bd8-877b-cbf6ea1d391b"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.444983 4783 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-config\") on node \"crc\" DevicePath \"\"" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.445168 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jbv8p\" (UniqueName: \"kubernetes.io/projected/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-kube-api-access-jbv8p\") on node \"crc\" DevicePath \"\"" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.445264 4783 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.445332 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.445496 4783 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.450930 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "751e9a32-8be6-4bd8-877b-cbf6ea1d391b" (UID: "751e9a32-8be6-4bd8-877b-cbf6ea1d391b"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.451290 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "751e9a32-8be6-4bd8-877b-cbf6ea1d391b" (UID: "751e9a32-8be6-4bd8-877b-cbf6ea1d391b"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.511642 4783 generic.go:334] "Generic (PLEG): container finished" podID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerID="d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85" exitCode=0 Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.511703 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.511703 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" event={"ID":"751e9a32-8be6-4bd8-877b-cbf6ea1d391b","Type":"ContainerDied","Data":"d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85"} Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.511763 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67b789f86c-5t2rp" event={"ID":"751e9a32-8be6-4bd8-877b-cbf6ea1d391b","Type":"ContainerDied","Data":"e4bc6a4614a7b71706c91eafbb253aec92ab2712d439556e50513fe0020a3205"} Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.511785 4783 scope.go:117] "RemoveContainer" containerID="d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.532642 4783 scope.go:117] "RemoveContainer" containerID="00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.544530 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-5t2rp"] Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.547723 4783 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-dns-svc\") on node \"crc\" DevicePath \"\"" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.547878 4783 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/751e9a32-8be6-4bd8-877b-cbf6ea1d391b-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.566090 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67b789f86c-5t2rp"] Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.575089 4783 scope.go:117] "RemoveContainer" containerID="d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85" Oct 02 11:20:09 crc kubenswrapper[4783]: E1002 11:20:09.575768 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85\": container with ID starting with d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85 not found: ID does not exist" containerID="d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.575806 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85"} err="failed to get container status \"d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85\": rpc error: code = NotFound desc = could not find container \"d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85\": container with ID starting with d3416e48eb08ecc0afc7ce7b0697e3d09d587248c676794ce516275fc8fe1b85 not found: ID does not exist" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.575833 4783 scope.go:117] "RemoveContainer" containerID="00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53" Oct 02 11:20:09 crc kubenswrapper[4783]: E1002 11:20:09.576313 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53\": container with ID starting with 00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53 not found: ID does not exist" containerID="00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53" Oct 02 11:20:09 crc kubenswrapper[4783]: I1002 11:20:09.576343 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53"} err="failed to get container status \"00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53\": rpc error: code = NotFound desc = could not find container \"00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53\": container with ID starting with 00f1922c51077ce6e0109ffc114fcacd6c76eabf1493f85dd40a72229631ea53 not found: ID does not exist" Oct 02 11:20:11 crc kubenswrapper[4783]: I1002 11:20:11.559483 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" path="/var/lib/kubelet/pods/751e9a32-8be6-4bd8-877b-cbf6ea1d391b/volumes" Oct 02 11:20:19 crc kubenswrapper[4783]: I1002 11:20:19.544430 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:20:19 crc kubenswrapper[4783]: E1002 11:20:19.545105 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:20:19 crc kubenswrapper[4783]: I1002 11:20:19.605273 4783 generic.go:334] "Generic (PLEG): container finished" podID="360fdfae-913f-4be9-985b-26101d3dfb3b" containerID="25d5866b14ca7e7857c2a7f56b911d9cade1a65bf700dcf7f09ee16764d8dad4" exitCode=0 Oct 02 11:20:19 crc kubenswrapper[4783]: I1002 11:20:19.605314 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"360fdfae-913f-4be9-985b-26101d3dfb3b","Type":"ContainerDied","Data":"25d5866b14ca7e7857c2a7f56b911d9cade1a65bf700dcf7f09ee16764d8dad4"} Oct 02 11:20:20 crc kubenswrapper[4783]: I1002 11:20:20.616143 4783 generic.go:334] "Generic (PLEG): container finished" podID="7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6" containerID="d8cdb80fce4830975325d95391efffaaa628dba619c7b6096fb0893ea341808d" exitCode=0 Oct 02 11:20:20 crc kubenswrapper[4783]: I1002 11:20:20.616599 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6","Type":"ContainerDied","Data":"d8cdb80fce4830975325d95391efffaaa628dba619c7b6096fb0893ea341808d"} Oct 02 11:20:20 crc kubenswrapper[4783]: I1002 11:20:20.621200 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"360fdfae-913f-4be9-985b-26101d3dfb3b","Type":"ContainerStarted","Data":"011e9ce63b1b2bb5092b3eddfdf1f5b0ff48fb967540188abed41132d9b2e12b"} Oct 02 11:20:20 crc kubenswrapper[4783]: I1002 11:20:20.621499 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:20:20 crc kubenswrapper[4783]: I1002 11:20:20.692966 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=36.69289114 podStartE2EDuration="36.69289114s" podCreationTimestamp="2025-10-02 11:19:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:20:20.680260785 +0000 UTC m=+1653.996455056" watchObservedRunningTime="2025-10-02 11:20:20.69289114 +0000 UTC m=+1654.009085411" Oct 02 11:20:21 crc kubenswrapper[4783]: I1002 11:20:21.631477 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6","Type":"ContainerStarted","Data":"9a5c3a94f76ed9338bc05ab1db85c6e763b708881fb9c2224e541d878496c780"} Oct 02 11:20:21 crc kubenswrapper[4783]: I1002 11:20:21.632080 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Oct 02 11:20:21 crc kubenswrapper[4783]: I1002 11:20:21.667402 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=36.667375986 podStartE2EDuration="36.667375986s" podCreationTimestamp="2025-10-02 11:19:45 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 11:20:21.658511163 +0000 UTC m=+1654.974705424" watchObservedRunningTime="2025-10-02 11:20:21.667375986 +0000 UTC m=+1654.983570247" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.940800 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg"] Oct 02 11:20:31 crc kubenswrapper[4783]: E1002 11:20:31.941836 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerName="dnsmasq-dns" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.941850 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerName="dnsmasq-dns" Oct 02 11:20:31 crc kubenswrapper[4783]: E1002 11:20:31.941866 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerName="init" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.941873 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerName="init" Oct 02 11:20:31 crc kubenswrapper[4783]: E1002 11:20:31.941905 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerName="init" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.941913 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerName="init" Oct 02 11:20:31 crc kubenswrapper[4783]: E1002 11:20:31.941926 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerName="dnsmasq-dns" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.941933 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerName="dnsmasq-dns" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.942119 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="751e9a32-8be6-4bd8-877b-cbf6ea1d391b" containerName="dnsmasq-dns" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.942144 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9e4493b-d398-4e4a-9467-7c08f76c26cf" containerName="dnsmasq-dns" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.942844 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.945503 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.945500 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.948260 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.948803 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.971934 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.972339 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88sw6\" (UniqueName: \"kubernetes.io/projected/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-kube-api-access-88sw6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.972540 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:31 crc kubenswrapper[4783]: I1002 11:20:31.972638 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.002517 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg"] Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.074307 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88sw6\" (UniqueName: \"kubernetes.io/projected/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-kube-api-access-88sw6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.074426 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.074481 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.074525 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.095836 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-inventory\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.096354 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88sw6\" (UniqueName: \"kubernetes.io/projected/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-kube-api-access-88sw6\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.096352 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-repo-setup-combined-ca-bundle\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.096886 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-ssh-key\") pod \"repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:32 crc kubenswrapper[4783]: I1002 11:20:32.263729 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:20:33 crc kubenswrapper[4783]: I1002 11:20:33.545745 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:20:33 crc kubenswrapper[4783]: E1002 11:20:33.547247 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:20:34 crc kubenswrapper[4783]: W1002 11:20:34.802948 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0880686c_c2ed_4b67_80e8_61a1d9aaa0de.slice/crio-0225e305190f09bba2da3b3c55579c07fd2312f7f6f165566ff1e0f3393b3f25 WatchSource:0}: Error finding container 0225e305190f09bba2da3b3c55579c07fd2312f7f6f165566ff1e0f3393b3f25: Status 404 returned error can't find the container with id 0225e305190f09bba2da3b3c55579c07fd2312f7f6f165566ff1e0f3393b3f25 Oct 02 11:20:34 crc kubenswrapper[4783]: I1002 11:20:34.804043 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg"] Oct 02 11:20:34 crc kubenswrapper[4783]: I1002 11:20:34.963591 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Oct 02 11:20:35 crc kubenswrapper[4783]: I1002 11:20:35.662116 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Oct 02 11:20:35 crc kubenswrapper[4783]: I1002 11:20:35.748855 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" event={"ID":"0880686c-c2ed-4b67-80e8-61a1d9aaa0de","Type":"ContainerStarted","Data":"0225e305190f09bba2da3b3c55579c07fd2312f7f6f165566ff1e0f3393b3f25"} Oct 02 11:20:46 crc kubenswrapper[4783]: I1002 11:20:46.545627 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:20:46 crc kubenswrapper[4783]: E1002 11:20:46.546319 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:20:50 crc kubenswrapper[4783]: I1002 11:20:50.758888 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:20:51 crc kubenswrapper[4783]: I1002 11:20:51.902954 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" event={"ID":"0880686c-c2ed-4b67-80e8-61a1d9aaa0de","Type":"ContainerStarted","Data":"a7e5211348734ebb25b9c7aaeca3f369077010413a72b512a5d95eb8d4d0d2ea"} Oct 02 11:20:51 crc kubenswrapper[4783]: I1002 11:20:51.942088 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" podStartSLOduration=4.991101823 podStartE2EDuration="20.942020697s" podCreationTimestamp="2025-10-02 11:20:31 +0000 UTC" firstStartedPulling="2025-10-02 11:20:34.805382803 +0000 UTC m=+1668.121577064" lastFinishedPulling="2025-10-02 11:20:50.756301677 +0000 UTC m=+1684.072495938" observedRunningTime="2025-10-02 11:20:51.924363334 +0000 UTC m=+1685.240557625" watchObservedRunningTime="2025-10-02 11:20:51.942020697 +0000 UTC m=+1685.258214968" Oct 02 11:20:53 crc kubenswrapper[4783]: I1002 11:20:53.364179 4783 scope.go:117] "RemoveContainer" containerID="4266001ae8854435bf1bb79107b79c61b25ece67b50254a237dc0f2dba2d06bf" Oct 02 11:20:53 crc kubenswrapper[4783]: I1002 11:20:53.413203 4783 scope.go:117] "RemoveContainer" containerID="617e73daab1fa7d04c236e9a25fd46b5542b5fb3258901a035760e38676e12d9" Oct 02 11:20:57 crc kubenswrapper[4783]: I1002 11:20:57.561569 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:20:57 crc kubenswrapper[4783]: E1002 11:20:57.561981 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:21:04 crc kubenswrapper[4783]: I1002 11:21:04.014701 4783 generic.go:334] "Generic (PLEG): container finished" podID="0880686c-c2ed-4b67-80e8-61a1d9aaa0de" containerID="a7e5211348734ebb25b9c7aaeca3f369077010413a72b512a5d95eb8d4d0d2ea" exitCode=0 Oct 02 11:21:04 crc kubenswrapper[4783]: I1002 11:21:04.014770 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" event={"ID":"0880686c-c2ed-4b67-80e8-61a1d9aaa0de","Type":"ContainerDied","Data":"a7e5211348734ebb25b9c7aaeca3f369077010413a72b512a5d95eb8d4d0d2ea"} Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.432615 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.539901 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88sw6\" (UniqueName: \"kubernetes.io/projected/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-kube-api-access-88sw6\") pod \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.539987 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-repo-setup-combined-ca-bundle\") pod \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.540089 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-ssh-key\") pod \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.540151 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-inventory\") pod \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\" (UID: \"0880686c-c2ed-4b67-80e8-61a1d9aaa0de\") " Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.545841 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "0880686c-c2ed-4b67-80e8-61a1d9aaa0de" (UID: "0880686c-c2ed-4b67-80e8-61a1d9aaa0de"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.546438 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-kube-api-access-88sw6" (OuterVolumeSpecName: "kube-api-access-88sw6") pod "0880686c-c2ed-4b67-80e8-61a1d9aaa0de" (UID: "0880686c-c2ed-4b67-80e8-61a1d9aaa0de"). InnerVolumeSpecName "kube-api-access-88sw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.572476 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-inventory" (OuterVolumeSpecName: "inventory") pod "0880686c-c2ed-4b67-80e8-61a1d9aaa0de" (UID: "0880686c-c2ed-4b67-80e8-61a1d9aaa0de"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.581935 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0880686c-c2ed-4b67-80e8-61a1d9aaa0de" (UID: "0880686c-c2ed-4b67-80e8-61a1d9aaa0de"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.643092 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.643128 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.643142 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88sw6\" (UniqueName: \"kubernetes.io/projected/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-kube-api-access-88sw6\") on node \"crc\" DevicePath \"\"" Oct 02 11:21:05 crc kubenswrapper[4783]: I1002 11:21:05.643155 4783 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0880686c-c2ed-4b67-80e8-61a1d9aaa0de-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.035794 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" event={"ID":"0880686c-c2ed-4b67-80e8-61a1d9aaa0de","Type":"ContainerDied","Data":"0225e305190f09bba2da3b3c55579c07fd2312f7f6f165566ff1e0f3393b3f25"} Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.035834 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0225e305190f09bba2da3b3c55579c07fd2312f7f6f165566ff1e0f3393b3f25" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.035895 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.144508 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg"] Oct 02 11:21:06 crc kubenswrapper[4783]: E1002 11:21:06.145568 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0880686c-c2ed-4b67-80e8-61a1d9aaa0de" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.145592 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0880686c-c2ed-4b67-80e8-61a1d9aaa0de" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.145820 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0880686c-c2ed-4b67-80e8-61a1d9aaa0de" containerName="repo-setup-edpm-deployment-openstack-edpm-ipam" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.146803 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.152033 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg"] Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.153444 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.153649 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.153707 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.153657 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.254850 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.255135 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.255258 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-csjk4\" (UniqueName: \"kubernetes.io/projected/55b494d4-5d17-4b36-bff6-75cad54634ac-kube-api-access-csjk4\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.356782 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.356869 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.356907 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-csjk4\" (UniqueName: \"kubernetes.io/projected/55b494d4-5d17-4b36-bff6-75cad54634ac-kube-api-access-csjk4\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.362791 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-inventory\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.363205 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-ssh-key\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.372984 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-csjk4\" (UniqueName: \"kubernetes.io/projected/55b494d4-5d17-4b36-bff6-75cad54634ac-kube-api-access-csjk4\") pod \"redhat-edpm-deployment-openstack-edpm-ipam-qsqrg\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:06 crc kubenswrapper[4783]: I1002 11:21:06.473564 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:07 crc kubenswrapper[4783]: I1002 11:21:07.015166 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg"] Oct 02 11:21:07 crc kubenswrapper[4783]: I1002 11:21:07.021093 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:21:07 crc kubenswrapper[4783]: I1002 11:21:07.046722 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" event={"ID":"55b494d4-5d17-4b36-bff6-75cad54634ac","Type":"ContainerStarted","Data":"5eb6da92a0fe035a9cb2f0eea58680f61bc21a7f9cb12e81dd0b138c4a6f2bd8"} Oct 02 11:21:08 crc kubenswrapper[4783]: I1002 11:21:08.056976 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" event={"ID":"55b494d4-5d17-4b36-bff6-75cad54634ac","Type":"ContainerStarted","Data":"37fe57c16996516a0f3ab605df39a2314c782bc31193a5bdfc9b1cd64767cbdf"} Oct 02 11:21:08 crc kubenswrapper[4783]: I1002 11:21:08.082993 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" podStartSLOduration=1.862803679 podStartE2EDuration="2.082973447s" podCreationTimestamp="2025-10-02 11:21:06 +0000 UTC" firstStartedPulling="2025-10-02 11:21:07.020836674 +0000 UTC m=+1700.337030935" lastFinishedPulling="2025-10-02 11:21:07.241006442 +0000 UTC m=+1700.557200703" observedRunningTime="2025-10-02 11:21:08.07687617 +0000 UTC m=+1701.393070431" watchObservedRunningTime="2025-10-02 11:21:08.082973447 +0000 UTC m=+1701.399167718" Oct 02 11:21:08 crc kubenswrapper[4783]: I1002 11:21:08.545667 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:21:08 crc kubenswrapper[4783]: E1002 11:21:08.545948 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:21:10 crc kubenswrapper[4783]: I1002 11:21:10.084537 4783 generic.go:334] "Generic (PLEG): container finished" podID="55b494d4-5d17-4b36-bff6-75cad54634ac" containerID="37fe57c16996516a0f3ab605df39a2314c782bc31193a5bdfc9b1cd64767cbdf" exitCode=0 Oct 02 11:21:10 crc kubenswrapper[4783]: I1002 11:21:10.085098 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" event={"ID":"55b494d4-5d17-4b36-bff6-75cad54634ac","Type":"ContainerDied","Data":"37fe57c16996516a0f3ab605df39a2314c782bc31193a5bdfc9b1cd64767cbdf"} Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.508517 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.653664 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-ssh-key\") pod \"55b494d4-5d17-4b36-bff6-75cad54634ac\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.654164 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-inventory\") pod \"55b494d4-5d17-4b36-bff6-75cad54634ac\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.654913 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-csjk4\" (UniqueName: \"kubernetes.io/projected/55b494d4-5d17-4b36-bff6-75cad54634ac-kube-api-access-csjk4\") pod \"55b494d4-5d17-4b36-bff6-75cad54634ac\" (UID: \"55b494d4-5d17-4b36-bff6-75cad54634ac\") " Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.661805 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55b494d4-5d17-4b36-bff6-75cad54634ac-kube-api-access-csjk4" (OuterVolumeSpecName: "kube-api-access-csjk4") pod "55b494d4-5d17-4b36-bff6-75cad54634ac" (UID: "55b494d4-5d17-4b36-bff6-75cad54634ac"). InnerVolumeSpecName "kube-api-access-csjk4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.686583 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-inventory" (OuterVolumeSpecName: "inventory") pod "55b494d4-5d17-4b36-bff6-75cad54634ac" (UID: "55b494d4-5d17-4b36-bff6-75cad54634ac"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.687472 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "55b494d4-5d17-4b36-bff6-75cad54634ac" (UID: "55b494d4-5d17-4b36-bff6-75cad54634ac"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.759065 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-csjk4\" (UniqueName: \"kubernetes.io/projected/55b494d4-5d17-4b36-bff6-75cad54634ac-kube-api-access-csjk4\") on node \"crc\" DevicePath \"\"" Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.759116 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:21:11 crc kubenswrapper[4783]: I1002 11:21:11.759129 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/55b494d4-5d17-4b36-bff6-75cad54634ac-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.115043 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" event={"ID":"55b494d4-5d17-4b36-bff6-75cad54634ac","Type":"ContainerDied","Data":"5eb6da92a0fe035a9cb2f0eea58680f61bc21a7f9cb12e81dd0b138c4a6f2bd8"} Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.115086 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5eb6da92a0fe035a9cb2f0eea58680f61bc21a7f9cb12e81dd0b138c4a6f2bd8" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.115161 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/redhat-edpm-deployment-openstack-edpm-ipam-qsqrg" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.217857 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x"] Oct 02 11:21:12 crc kubenswrapper[4783]: E1002 11:21:12.218404 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55b494d4-5d17-4b36-bff6-75cad54634ac" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.218459 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="55b494d4-5d17-4b36-bff6-75cad54634ac" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.218804 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="55b494d4-5d17-4b36-bff6-75cad54634ac" containerName="redhat-edpm-deployment-openstack-edpm-ipam" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.219889 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.223904 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.223970 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.223971 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.224052 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.234430 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x"] Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.269136 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.269252 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mpf98\" (UniqueName: \"kubernetes.io/projected/eca56991-989e-44fc-9bb6-ee52ef352d73-kube-api-access-mpf98\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.269316 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.269442 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.370912 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.370972 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.371032 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mpf98\" (UniqueName: \"kubernetes.io/projected/eca56991-989e-44fc-9bb6-ee52ef352d73-kube-api-access-mpf98\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.371103 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.375467 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-ssh-key\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.376091 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-bootstrap-combined-ca-bundle\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.376375 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-inventory\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.389191 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mpf98\" (UniqueName: \"kubernetes.io/projected/eca56991-989e-44fc-9bb6-ee52ef352d73-kube-api-access-mpf98\") pod \"bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:12 crc kubenswrapper[4783]: I1002 11:21:12.557681 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:21:13 crc kubenswrapper[4783]: I1002 11:21:13.084364 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x"] Oct 02 11:21:13 crc kubenswrapper[4783]: I1002 11:21:13.125164 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" event={"ID":"eca56991-989e-44fc-9bb6-ee52ef352d73","Type":"ContainerStarted","Data":"c511133d88f227e3d77978d8a641419c66343fd8c08efc457deeee8ba46fee9b"} Oct 02 11:21:14 crc kubenswrapper[4783]: I1002 11:21:14.137346 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" event={"ID":"eca56991-989e-44fc-9bb6-ee52ef352d73","Type":"ContainerStarted","Data":"a6a2465e6217bf98b7bd27a0288ef1757bf59d0121a2a381bd8a43e62470084c"} Oct 02 11:21:22 crc kubenswrapper[4783]: I1002 11:21:22.545728 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:21:22 crc kubenswrapper[4783]: E1002 11:21:22.546585 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:21:31 crc kubenswrapper[4783]: I1002 11:21:31.585619 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" podUID="624d75e0-0672-4797-8791-25096bfbf553" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:32 crc kubenswrapper[4783]: I1002 11:21:32.644836 4783 patch_prober.go:28] interesting pod/route-controller-manager-688bd74c48-7v6qj container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.64:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:32 crc kubenswrapper[4783]: I1002 11:21:32.645147 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" podUID="df229baf-94b2-4d0c-a109-e679b8522f7b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.64:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:34 crc kubenswrapper[4783]: I1002 11:21:34.546118 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:21:34 crc kubenswrapper[4783]: E1002 11:21:34.546627 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:21:37 crc kubenswrapper[4783]: I1002 11:21:37.826000 4783 patch_prober.go:28] interesting pod/oauth-openshift-55c8c74798-mhwv9 container/oauth-openshift namespace/openshift-authentication: Liveness probe status=failure output="Get \"https://10.217.0.60:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:37 crc kubenswrapper[4783]: I1002 11:21:37.826449 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-authentication/oauth-openshift-55c8c74798-mhwv9" podUID="4f45009b-78a2-4b42-882c-bcb88825f343" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.60:6443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:41 crc kubenswrapper[4783]: I1002 11:21:41.446516 4783 patch_prober.go:28] interesting pod/console-operator-58897d9998-x9scv container/console-operator namespace/openshift-console-operator: Liveness probe status=failure output="Get \"https://10.217.0.24:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:41 crc kubenswrapper[4783]: I1002 11:21:41.446853 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console-operator/console-operator-58897d9998-x9scv" podUID="12e7e4aa-75cb-41c1-8d03-8eea90096e8c" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/healthz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:41 crc kubenswrapper[4783]: I1002 11:21:41.446579 4783 patch_prober.go:28] interesting pod/console-operator-58897d9998-x9scv container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.24:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:41 crc kubenswrapper[4783]: I1002 11:21:41.446905 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-x9scv" podUID="12e7e4aa-75cb-41c1-8d03-8eea90096e8c" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.24:8443/readyz\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:41 crc kubenswrapper[4783]: I1002 11:21:41.586604 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="metallb-system/metallb-operator-controller-manager-c4c4dd5fd-sqv6n" podUID="624d75e0-0672-4797-8791-25096bfbf553" containerName="manager" probeResult="failure" output="Get \"http://10.217.0.49:8080/readyz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:42 crc kubenswrapper[4783]: I1002 11:21:42.339162 4783 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-js7h8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Liveness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:42 crc kubenswrapper[4783]: I1002 11:21:42.339218 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" podUID="33db5790-d9e7-4599-b8c7-7578ccb9940a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:42 crc kubenswrapper[4783]: I1002 11:21:42.339163 4783 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-js7h8 container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:42 crc kubenswrapper[4783]: I1002 11:21:42.339363 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-js7h8" podUID="33db5790-d9e7-4599-b8c7-7578ccb9940a" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.22:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:42 crc kubenswrapper[4783]: I1002 11:21:42.645244 4783 patch_prober.go:28] interesting pod/route-controller-manager-688bd74c48-7v6qj container/route-controller-manager namespace/openshift-route-controller-manager: Liveness probe status=failure output="Get \"https://10.217.0.64:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:42 crc kubenswrapper[4783]: I1002 11:21:42.645308 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-route-controller-manager/route-controller-manager-688bd74c48-7v6qj" podUID="df229baf-94b2-4d0c-a109-e679b8522f7b" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.64:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:43 crc kubenswrapper[4783]: I1002 11:21:43.892558 4783 patch_prober.go:28] interesting pod/dns-default-8vjpx container/dns namespace/openshift-dns: Readiness probe status=failure output="Get \"http://10.217.0.43:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Oct 02 11:21:43 crc kubenswrapper[4783]: I1002 11:21:43.892892 4783 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-dns/dns-default-8vjpx" podUID="9675dc45-ac16-4440-a4b1-2c3cfeff2459" containerName="dns" probeResult="failure" output="Get \"http://10.217.0.43:8181/ready\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Oct 02 11:21:44 crc kubenswrapper[4783]: I1002 11:21:44.663341 4783 trace.go:236] Trace[1716097480]: "Calculate volume metrics of catalog-content for pod openshift-marketplace/redhat-operators-xqljq" (02-Oct-2025 11:21:31.137) (total time: 13526ms): Oct 02 11:21:44 crc kubenswrapper[4783]: Trace[1716097480]: [13.526086616s] [13.526086616s] END Oct 02 11:21:49 crc kubenswrapper[4783]: I1002 11:21:49.545319 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:21:49 crc kubenswrapper[4783]: E1002 11:21:49.546161 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:22:03 crc kubenswrapper[4783]: I1002 11:22:03.545563 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:22:03 crc kubenswrapper[4783]: E1002 11:22:03.546321 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:22:17 crc kubenswrapper[4783]: I1002 11:22:17.553951 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:22:17 crc kubenswrapper[4783]: E1002 11:22:17.554897 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:22:29 crc kubenswrapper[4783]: I1002 11:22:29.545948 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:22:29 crc kubenswrapper[4783]: E1002 11:22:29.546954 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:22:41 crc kubenswrapper[4783]: I1002 11:22:41.545202 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:22:41 crc kubenswrapper[4783]: E1002 11:22:41.546201 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:22:53 crc kubenswrapper[4783]: I1002 11:22:53.545539 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:22:53 crc kubenswrapper[4783]: E1002 11:22:53.546349 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:23:06 crc kubenswrapper[4783]: I1002 11:23:06.035768 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" podStartSLOduration=113.840441657 podStartE2EDuration="1m54.035750845s" podCreationTimestamp="2025-10-02 11:21:12 +0000 UTC" firstStartedPulling="2025-10-02 11:21:13.101825339 +0000 UTC m=+1706.418019600" lastFinishedPulling="2025-10-02 11:21:13.297134527 +0000 UTC m=+1706.613328788" observedRunningTime="2025-10-02 11:21:14.158485981 +0000 UTC m=+1707.474680242" watchObservedRunningTime="2025-10-02 11:23:06.035750845 +0000 UTC m=+1819.351945106" Oct 02 11:23:06 crc kubenswrapper[4783]: I1002 11:23:06.042280 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-wqbkd"] Oct 02 11:23:06 crc kubenswrapper[4783]: I1002 11:23:06.050835 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-wqbkd"] Oct 02 11:23:07 crc kubenswrapper[4783]: I1002 11:23:07.026225 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-ql48j"] Oct 02 11:23:07 crc kubenswrapper[4783]: I1002 11:23:07.033497 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-ql48j"] Oct 02 11:23:07 crc kubenswrapper[4783]: I1002 11:23:07.563448 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad34849d-3626-40fd-9ed2-a0c3064d12d0" path="/var/lib/kubelet/pods/ad34849d-3626-40fd-9ed2-a0c3064d12d0/volumes" Oct 02 11:23:07 crc kubenswrapper[4783]: I1002 11:23:07.564750 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b0930159-862f-47a8-9516-5b9aaf532653" path="/var/lib/kubelet/pods/b0930159-862f-47a8-9516-5b9aaf532653/volumes" Oct 02 11:23:08 crc kubenswrapper[4783]: I1002 11:23:08.544500 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:23:08 crc kubenswrapper[4783]: E1002 11:23:08.545022 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:23:10 crc kubenswrapper[4783]: I1002 11:23:10.044575 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-d6dk9"] Oct 02 11:23:10 crc kubenswrapper[4783]: I1002 11:23:10.057542 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-d6dk9"] Oct 02 11:23:11 crc kubenswrapper[4783]: I1002 11:23:11.558131 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a4e12345-bf38-4e17-bcc3-4e507b73e9b8" path="/var/lib/kubelet/pods/a4e12345-bf38-4e17-bcc3-4e507b73e9b8/volumes" Oct 02 11:23:14 crc kubenswrapper[4783]: I1002 11:23:14.047813 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-vndjn"] Oct 02 11:23:14 crc kubenswrapper[4783]: I1002 11:23:14.060730 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-pqd5f"] Oct 02 11:23:14 crc kubenswrapper[4783]: I1002 11:23:14.070125 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-vndjn"] Oct 02 11:23:14 crc kubenswrapper[4783]: I1002 11:23:14.077715 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-pqd5f"] Oct 02 11:23:14 crc kubenswrapper[4783]: I1002 11:23:14.086480 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-2btm9"] Oct 02 11:23:14 crc kubenswrapper[4783]: I1002 11:23:14.095564 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-2btm9"] Oct 02 11:23:15 crc kubenswrapper[4783]: I1002 11:23:15.561331 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17badb90-21e0-40b3-b28f-15424c38ca7a" path="/var/lib/kubelet/pods/17badb90-21e0-40b3-b28f-15424c38ca7a/volumes" Oct 02 11:23:15 crc kubenswrapper[4783]: I1002 11:23:15.562586 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="546476d1-d7f2-42d1-8cf0-dff8dbaa5f10" path="/var/lib/kubelet/pods/546476d1-d7f2-42d1-8cf0-dff8dbaa5f10/volumes" Oct 02 11:23:15 crc kubenswrapper[4783]: I1002 11:23:15.563634 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd068d52-b7f9-4d38-80bf-941cac6d3b39" path="/var/lib/kubelet/pods/cd068d52-b7f9-4d38-80bf-941cac6d3b39/volumes" Oct 02 11:23:19 crc kubenswrapper[4783]: I1002 11:23:19.545564 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:23:19 crc kubenswrapper[4783]: E1002 11:23:19.546454 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:23:20 crc kubenswrapper[4783]: I1002 11:23:20.030818 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-da86-account-create-d4vxn"] Oct 02 11:23:20 crc kubenswrapper[4783]: I1002 11:23:20.042761 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-da86-account-create-d4vxn"] Oct 02 11:23:21 crc kubenswrapper[4783]: I1002 11:23:21.564630 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="edb9f3db-8908-4bd6-a86e-9a46247458e7" path="/var/lib/kubelet/pods/edb9f3db-8908-4bd6-a86e-9a46247458e7/volumes" Oct 02 11:23:24 crc kubenswrapper[4783]: I1002 11:23:24.028809 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-a166-account-create-h6f94"] Oct 02 11:23:24 crc kubenswrapper[4783]: I1002 11:23:24.037981 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-98ca-account-create-l5h9r"] Oct 02 11:23:24 crc kubenswrapper[4783]: I1002 11:23:24.051286 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-a166-account-create-h6f94"] Oct 02 11:23:24 crc kubenswrapper[4783]: I1002 11:23:24.060690 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-98ca-account-create-l5h9r"] Oct 02 11:23:25 crc kubenswrapper[4783]: I1002 11:23:25.556261 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4490b7d0-ae49-4417-b778-2050301901bc" path="/var/lib/kubelet/pods/4490b7d0-ae49-4417-b778-2050301901bc/volumes" Oct 02 11:23:25 crc kubenswrapper[4783]: I1002 11:23:25.557047 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df261fe1-316c-4f6c-828f-b0668ed6c1ee" path="/var/lib/kubelet/pods/df261fe1-316c-4f6c-828f-b0668ed6c1ee/volumes" Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.029645 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-a382-account-create-2ccl8"] Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.037567 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-fea5-account-create-hh5rg"] Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.045658 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-a382-account-create-2ccl8"] Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.062306 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-3b79-account-create-pkl5b"] Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.067113 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-3b79-account-create-pkl5b"] Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.076705 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-fea5-account-create-hh5rg"] Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.561187 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d0e3dc9-58c4-489e-aef0-462f7ba245ca" path="/var/lib/kubelet/pods/1d0e3dc9-58c4-489e-aef0-462f7ba245ca/volumes" Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.562088 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7600bb43-28bc-4d04-9fb5-6dcef8b0841f" path="/var/lib/kubelet/pods/7600bb43-28bc-4d04-9fb5-6dcef8b0841f/volumes" Oct 02 11:23:27 crc kubenswrapper[4783]: I1002 11:23:27.563048 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f366b995-e9d1-4ddf-98a9-3278480dca51" path="/var/lib/kubelet/pods/f366b995-e9d1-4ddf-98a9-3278480dca51/volumes" Oct 02 11:23:30 crc kubenswrapper[4783]: I1002 11:23:30.545361 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:23:30 crc kubenswrapper[4783]: E1002 11:23:30.546760 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:23:41 crc kubenswrapper[4783]: I1002 11:23:41.545523 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:23:41 crc kubenswrapper[4783]: E1002 11:23:41.546293 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.544471 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.562512 4783 scope.go:117] "RemoveContainer" containerID="f02fcc448c68e9a5c7c8e5fbc7ee7d25dd17bb540bc05d430e935ca6c555308a" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.602022 4783 scope.go:117] "RemoveContainer" containerID="56e154c943c21f19bf8e2daf9bca77372df526985a61171ab96ef43dcd649bd9" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.663672 4783 scope.go:117] "RemoveContainer" containerID="1000a4525ec9a400caaaa8778c9ec543673e87d9d8e0497e58b90fdc691b7af3" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.701872 4783 scope.go:117] "RemoveContainer" containerID="7069db6b24197a2f8b61e3892bfb81505504fe6a7ca3d8736e6e7c76df8bda29" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.766206 4783 scope.go:117] "RemoveContainer" containerID="3743878d85cf7c490d69a9d93f05870314d6f007e054f6c244319497089995db" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.809239 4783 scope.go:117] "RemoveContainer" containerID="d90f99d0a30bf0b28248e4132694e61993f4f68d092135e789aa4df5612d61a3" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.831053 4783 scope.go:117] "RemoveContainer" containerID="37db03a9a688a2f5a8f4865484a0f4983d591e8228ca6164ceed8236d79d3189" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.856069 4783 scope.go:117] "RemoveContainer" containerID="e639993a15b5efac2876adbb33ea5e5cbb75aa4608deed6fcc8804e26a746f4c" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.878951 4783 scope.go:117] "RemoveContainer" containerID="b1b04c86772704658a7a4ed863917efd35af162ad1746fc8c961fed2211dfb77" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.901184 4783 scope.go:117] "RemoveContainer" containerID="a23a88861e89dccc770c76fd4dba478d3df409ad874799c840ca37eeae4e93bf" Oct 02 11:23:53 crc kubenswrapper[4783]: I1002 11:23:53.925877 4783 scope.go:117] "RemoveContainer" containerID="ab15a3cd7992b45c329572700c99718692ab18197b5fb99e472dd12d4fcae8ec" Oct 02 11:23:54 crc kubenswrapper[4783]: I1002 11:23:54.085914 4783 scope.go:117] "RemoveContainer" containerID="efde3595061895401dfcee0fad6ea588c8de561210a6305942021a2175e68bb1" Oct 02 11:23:54 crc kubenswrapper[4783]: I1002 11:23:54.114169 4783 scope.go:117] "RemoveContainer" containerID="fd8ad7d0041a27208a3f301f3b2bbab6b46af9b32b0db4d7d0ad2921efd4cb6c" Oct 02 11:23:54 crc kubenswrapper[4783]: I1002 11:23:54.665178 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"4273188ae108ec0f85de3e8dc29d684f9057cc5d4a801982cba2234f8872c085"} Oct 02 11:24:02 crc kubenswrapper[4783]: I1002 11:24:02.046671 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-qmrrk"] Oct 02 11:24:02 crc kubenswrapper[4783]: I1002 11:24:02.063258 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-qmrrk"] Oct 02 11:24:03 crc kubenswrapper[4783]: I1002 11:24:03.554276 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c0c6de12-9394-4f1e-b50b-c5b54e840d5d" path="/var/lib/kubelet/pods/c0c6de12-9394-4f1e-b50b-c5b54e840d5d/volumes" Oct 02 11:24:49 crc kubenswrapper[4783]: I1002 11:24:49.039153 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-jq2kf"] Oct 02 11:24:49 crc kubenswrapper[4783]: I1002 11:24:49.049980 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-jq2kf"] Oct 02 11:24:49 crc kubenswrapper[4783]: I1002 11:24:49.562739 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aea4b3d6-8814-424e-a0b0-2748b63f0bfd" path="/var/lib/kubelet/pods/aea4b3d6-8814-424e-a0b0-2748b63f0bfd/volumes" Oct 02 11:24:54 crc kubenswrapper[4783]: I1002 11:24:54.337672 4783 scope.go:117] "RemoveContainer" containerID="80df88a4dfe442fb63bd389e19aea355128a1f21d3dc524dcc1bbca21efa2ad4" Oct 02 11:24:54 crc kubenswrapper[4783]: I1002 11:24:54.371507 4783 scope.go:117] "RemoveContainer" containerID="2aaa87d75cd2f7f0c066b1784385c64cc0de6811f713c16422f6cb7a24d61add" Oct 02 11:24:54 crc kubenswrapper[4783]: I1002 11:24:54.413174 4783 scope.go:117] "RemoveContainer" containerID="3ff6335580e338590b57c72739a560c6ae28c99def50ffd05d6f6e11755467e6" Oct 02 11:24:54 crc kubenswrapper[4783]: I1002 11:24:54.492505 4783 scope.go:117] "RemoveContainer" containerID="a2d6719a5da2934cc78a8c36ada520d54abcf4bc71b1ec279ccabb2730face6b" Oct 02 11:24:56 crc kubenswrapper[4783]: I1002 11:24:56.172567 4783 generic.go:334] "Generic (PLEG): container finished" podID="eca56991-989e-44fc-9bb6-ee52ef352d73" containerID="a6a2465e6217bf98b7bd27a0288ef1757bf59d0121a2a381bd8a43e62470084c" exitCode=0 Oct 02 11:24:56 crc kubenswrapper[4783]: I1002 11:24:56.172691 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" event={"ID":"eca56991-989e-44fc-9bb6-ee52ef352d73","Type":"ContainerDied","Data":"a6a2465e6217bf98b7bd27a0288ef1757bf59d0121a2a381bd8a43e62470084c"} Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.585510 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.693058 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mpf98\" (UniqueName: \"kubernetes.io/projected/eca56991-989e-44fc-9bb6-ee52ef352d73-kube-api-access-mpf98\") pod \"eca56991-989e-44fc-9bb6-ee52ef352d73\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.693189 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-bootstrap-combined-ca-bundle\") pod \"eca56991-989e-44fc-9bb6-ee52ef352d73\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.693295 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-ssh-key\") pod \"eca56991-989e-44fc-9bb6-ee52ef352d73\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.693354 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-inventory\") pod \"eca56991-989e-44fc-9bb6-ee52ef352d73\" (UID: \"eca56991-989e-44fc-9bb6-ee52ef352d73\") " Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.698494 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "eca56991-989e-44fc-9bb6-ee52ef352d73" (UID: "eca56991-989e-44fc-9bb6-ee52ef352d73"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.699579 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/eca56991-989e-44fc-9bb6-ee52ef352d73-kube-api-access-mpf98" (OuterVolumeSpecName: "kube-api-access-mpf98") pod "eca56991-989e-44fc-9bb6-ee52ef352d73" (UID: "eca56991-989e-44fc-9bb6-ee52ef352d73"). InnerVolumeSpecName "kube-api-access-mpf98". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.720935 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "eca56991-989e-44fc-9bb6-ee52ef352d73" (UID: "eca56991-989e-44fc-9bb6-ee52ef352d73"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.730680 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-inventory" (OuterVolumeSpecName: "inventory") pod "eca56991-989e-44fc-9bb6-ee52ef352d73" (UID: "eca56991-989e-44fc-9bb6-ee52ef352d73"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.796011 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mpf98\" (UniqueName: \"kubernetes.io/projected/eca56991-989e-44fc-9bb6-ee52ef352d73-kube-api-access-mpf98\") on node \"crc\" DevicePath \"\"" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.796055 4783 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.796069 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:24:57 crc kubenswrapper[4783]: I1002 11:24:57.796081 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/eca56991-989e-44fc-9bb6-ee52ef352d73-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.189888 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" event={"ID":"eca56991-989e-44fc-9bb6-ee52ef352d73","Type":"ContainerDied","Data":"c511133d88f227e3d77978d8a641419c66343fd8c08efc457deeee8ba46fee9b"} Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.190205 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c511133d88f227e3d77978d8a641419c66343fd8c08efc457deeee8ba46fee9b" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.189978 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.305467 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn"] Oct 02 11:24:58 crc kubenswrapper[4783]: E1002 11:24:58.305920 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="eca56991-989e-44fc-9bb6-ee52ef352d73" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.305947 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="eca56991-989e-44fc-9bb6-ee52ef352d73" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.306235 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="eca56991-989e-44fc-9bb6-ee52ef352d73" containerName="bootstrap-edpm-deployment-openstack-edpm-ipam" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.307039 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.309482 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.309856 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.310446 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.310865 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.329442 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn"] Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.408953 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fsgzr\" (UniqueName: \"kubernetes.io/projected/064976fc-7c96-48ee-9a31-869b82f4b6da-kube-api-access-fsgzr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.409010 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.409061 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.510539 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fsgzr\" (UniqueName: \"kubernetes.io/projected/064976fc-7c96-48ee-9a31-869b82f4b6da-kube-api-access-fsgzr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.510586 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.510639 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.516155 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-ssh-key\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.526638 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-inventory\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.529755 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fsgzr\" (UniqueName: \"kubernetes.io/projected/064976fc-7c96-48ee-9a31-869b82f4b6da-kube-api-access-fsgzr\") pod \"download-cache-edpm-deployment-openstack-edpm-ipam-579nn\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:58 crc kubenswrapper[4783]: I1002 11:24:58.625321 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:24:59 crc kubenswrapper[4783]: I1002 11:24:59.118210 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn"] Oct 02 11:24:59 crc kubenswrapper[4783]: I1002 11:24:59.198328 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" event={"ID":"064976fc-7c96-48ee-9a31-869b82f4b6da","Type":"ContainerStarted","Data":"ded8f3472bce9bcd4106b9a483c465c2d8813903240a8afbdb0235677f7a8661"} Oct 02 11:25:02 crc kubenswrapper[4783]: I1002 11:25:02.243004 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" event={"ID":"064976fc-7c96-48ee-9a31-869b82f4b6da","Type":"ContainerStarted","Data":"da43b095d64f2f1c07cefc5b26d30b5adc3601d7eb3c1a55e4c6212525e788e3"} Oct 02 11:25:02 crc kubenswrapper[4783]: I1002 11:25:02.268171 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" podStartSLOduration=2.481413347 podStartE2EDuration="4.268147104s" podCreationTimestamp="2025-10-02 11:24:58 +0000 UTC" firstStartedPulling="2025-10-02 11:24:59.129250806 +0000 UTC m=+1932.445445067" lastFinishedPulling="2025-10-02 11:25:00.915984553 +0000 UTC m=+1934.232178824" observedRunningTime="2025-10-02 11:25:02.261257302 +0000 UTC m=+1935.577451573" watchObservedRunningTime="2025-10-02 11:25:02.268147104 +0000 UTC m=+1935.584341365" Oct 02 11:25:27 crc kubenswrapper[4783]: I1002 11:25:27.039851 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-bw8mq"] Oct 02 11:25:27 crc kubenswrapper[4783]: I1002 11:25:27.046557 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-bw8mq"] Oct 02 11:25:27 crc kubenswrapper[4783]: I1002 11:25:27.557798 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f856246-d4db-48e2-81ec-b756ceba0667" path="/var/lib/kubelet/pods/2f856246-d4db-48e2-81ec-b756ceba0667/volumes" Oct 02 11:25:31 crc kubenswrapper[4783]: I1002 11:25:31.025946 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-l9zch"] Oct 02 11:25:31 crc kubenswrapper[4783]: I1002 11:25:31.035679 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-l9zch"] Oct 02 11:25:31 crc kubenswrapper[4783]: I1002 11:25:31.556826 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c42c3ba-c130-4b8d-940a-9aa134629554" path="/var/lib/kubelet/pods/6c42c3ba-c130-4b8d-940a-9aa134629554/volumes" Oct 02 11:25:39 crc kubenswrapper[4783]: I1002 11:25:39.030096 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-g9szh"] Oct 02 11:25:39 crc kubenswrapper[4783]: I1002 11:25:39.039720 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-g9szh"] Oct 02 11:25:39 crc kubenswrapper[4783]: I1002 11:25:39.559466 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="68c31bf9-b59a-43ed-bb74-9e6cc0bce703" path="/var/lib/kubelet/pods/68c31bf9-b59a-43ed-bb74-9e6cc0bce703/volumes" Oct 02 11:25:50 crc kubenswrapper[4783]: I1002 11:25:50.045363 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-bh7g4"] Oct 02 11:25:50 crc kubenswrapper[4783]: I1002 11:25:50.057508 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-sqsnf"] Oct 02 11:25:50 crc kubenswrapper[4783]: I1002 11:25:50.066716 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-bh7g4"] Oct 02 11:25:50 crc kubenswrapper[4783]: I1002 11:25:50.073847 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-sqsnf"] Oct 02 11:25:51 crc kubenswrapper[4783]: I1002 11:25:51.557565 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="479a79a2-f65b-443b-865a-bec4c138b978" path="/var/lib/kubelet/pods/479a79a2-f65b-443b-865a-bec4c138b978/volumes" Oct 02 11:25:51 crc kubenswrapper[4783]: I1002 11:25:51.558369 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e4fb56d-2565-4383-a883-a0c1eae40cb4" path="/var/lib/kubelet/pods/9e4fb56d-2565-4383-a883-a0c1eae40cb4/volumes" Oct 02 11:25:54 crc kubenswrapper[4783]: I1002 11:25:54.599824 4783 scope.go:117] "RemoveContainer" containerID="d0393a0ce247d27be012699cdac67d87ae42e593c65a68d13ea245fd10b74bc7" Oct 02 11:25:54 crc kubenswrapper[4783]: I1002 11:25:54.642759 4783 scope.go:117] "RemoveContainer" containerID="17134fa1c0bb54795ef7e71f2b048d8f0c6857876242e93c430e358361994184" Oct 02 11:25:54 crc kubenswrapper[4783]: I1002 11:25:54.690503 4783 scope.go:117] "RemoveContainer" containerID="31ba9fc88407b90063a3b07e92a4f172cd98c0503845642ff22552a9d5ec1cdf" Oct 02 11:25:54 crc kubenswrapper[4783]: I1002 11:25:54.755230 4783 scope.go:117] "RemoveContainer" containerID="6065cae19bc7b3d54b1ea3462555fa77433d6d20d53fc623121834b0b2926859" Oct 02 11:25:54 crc kubenswrapper[4783]: I1002 11:25:54.825315 4783 scope.go:117] "RemoveContainer" containerID="3bc304ac88d2823182da9a993a1fdbce5020a427e3173b94a6214924e530d810" Oct 02 11:26:21 crc kubenswrapper[4783]: I1002 11:26:21.514165 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:26:21 crc kubenswrapper[4783]: I1002 11:26:21.514936 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:26:48 crc kubenswrapper[4783]: I1002 11:26:48.156510 4783 generic.go:334] "Generic (PLEG): container finished" podID="064976fc-7c96-48ee-9a31-869b82f4b6da" containerID="da43b095d64f2f1c07cefc5b26d30b5adc3601d7eb3c1a55e4c6212525e788e3" exitCode=0 Oct 02 11:26:48 crc kubenswrapper[4783]: I1002 11:26:48.156624 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" event={"ID":"064976fc-7c96-48ee-9a31-869b82f4b6da","Type":"ContainerDied","Data":"da43b095d64f2f1c07cefc5b26d30b5adc3601d7eb3c1a55e4c6212525e788e3"} Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.558671 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.684249 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-ssh-key\") pod \"064976fc-7c96-48ee-9a31-869b82f4b6da\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.684422 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fsgzr\" (UniqueName: \"kubernetes.io/projected/064976fc-7c96-48ee-9a31-869b82f4b6da-kube-api-access-fsgzr\") pod \"064976fc-7c96-48ee-9a31-869b82f4b6da\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.684542 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-inventory\") pod \"064976fc-7c96-48ee-9a31-869b82f4b6da\" (UID: \"064976fc-7c96-48ee-9a31-869b82f4b6da\") " Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.690556 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/064976fc-7c96-48ee-9a31-869b82f4b6da-kube-api-access-fsgzr" (OuterVolumeSpecName: "kube-api-access-fsgzr") pod "064976fc-7c96-48ee-9a31-869b82f4b6da" (UID: "064976fc-7c96-48ee-9a31-869b82f4b6da"). InnerVolumeSpecName "kube-api-access-fsgzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.712971 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-inventory" (OuterVolumeSpecName: "inventory") pod "064976fc-7c96-48ee-9a31-869b82f4b6da" (UID: "064976fc-7c96-48ee-9a31-869b82f4b6da"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.714975 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "064976fc-7c96-48ee-9a31-869b82f4b6da" (UID: "064976fc-7c96-48ee-9a31-869b82f4b6da"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.786565 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.786601 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fsgzr\" (UniqueName: \"kubernetes.io/projected/064976fc-7c96-48ee-9a31-869b82f4b6da-kube-api-access-fsgzr\") on node \"crc\" DevicePath \"\"" Oct 02 11:26:49 crc kubenswrapper[4783]: I1002 11:26:49.786615 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/064976fc-7c96-48ee-9a31-869b82f4b6da-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.175953 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" event={"ID":"064976fc-7c96-48ee-9a31-869b82f4b6da","Type":"ContainerDied","Data":"ded8f3472bce9bcd4106b9a483c465c2d8813903240a8afbdb0235677f7a8661"} Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.175989 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ded8f3472bce9bcd4106b9a483c465c2d8813903240a8afbdb0235677f7a8661" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.176051 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/download-cache-edpm-deployment-openstack-edpm-ipam-579nn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.261139 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn"] Oct 02 11:26:50 crc kubenswrapper[4783]: E1002 11:26:50.261792 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="064976fc-7c96-48ee-9a31-869b82f4b6da" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.261874 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="064976fc-7c96-48ee-9a31-869b82f4b6da" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.262105 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="064976fc-7c96-48ee-9a31-869b82f4b6da" containerName="download-cache-edpm-deployment-openstack-edpm-ipam" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.262806 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.267345 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.271003 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.271013 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.271086 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.274913 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn"] Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.397024 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.397652 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fcpmc\" (UniqueName: \"kubernetes.io/projected/0a226960-8105-46f9-b2dc-fc4347bec328-kube-api-access-fcpmc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.397960 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.499834 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.499979 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.500010 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fcpmc\" (UniqueName: \"kubernetes.io/projected/0a226960-8105-46f9-b2dc-fc4347bec328-kube-api-access-fcpmc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.505337 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-ssh-key\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.510339 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-inventory\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.519007 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fcpmc\" (UniqueName: \"kubernetes.io/projected/0a226960-8105-46f9-b2dc-fc4347bec328-kube-api-access-fcpmc\") pod \"configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:50 crc kubenswrapper[4783]: I1002 11:26:50.590646 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:26:51 crc kubenswrapper[4783]: I1002 11:26:51.138541 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn"] Oct 02 11:26:51 crc kubenswrapper[4783]: W1002 11:26:51.141717 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0a226960_8105_46f9_b2dc_fc4347bec328.slice/crio-72f363349c1d612212f08177b3c3b85b8f228745a5d07d30e5cc511bebb72aa0 WatchSource:0}: Error finding container 72f363349c1d612212f08177b3c3b85b8f228745a5d07d30e5cc511bebb72aa0: Status 404 returned error can't find the container with id 72f363349c1d612212f08177b3c3b85b8f228745a5d07d30e5cc511bebb72aa0 Oct 02 11:26:51 crc kubenswrapper[4783]: I1002 11:26:51.145341 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:26:51 crc kubenswrapper[4783]: I1002 11:26:51.191775 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" event={"ID":"0a226960-8105-46f9-b2dc-fc4347bec328","Type":"ContainerStarted","Data":"72f363349c1d612212f08177b3c3b85b8f228745a5d07d30e5cc511bebb72aa0"} Oct 02 11:26:51 crc kubenswrapper[4783]: I1002 11:26:51.513974 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:26:51 crc kubenswrapper[4783]: I1002 11:26:51.514508 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:26:52 crc kubenswrapper[4783]: I1002 11:26:52.201917 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" event={"ID":"0a226960-8105-46f9-b2dc-fc4347bec328","Type":"ContainerStarted","Data":"692069f53fd56f8ad48722cb6c6886220fd32c749a4460584c8da32df3b49b71"} Oct 02 11:26:52 crc kubenswrapper[4783]: I1002 11:26:52.229730 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" podStartSLOduration=1.942037525 podStartE2EDuration="2.229709486s" podCreationTimestamp="2025-10-02 11:26:50 +0000 UTC" firstStartedPulling="2025-10-02 11:26:51.144811829 +0000 UTC m=+2044.461006090" lastFinishedPulling="2025-10-02 11:26:51.43248379 +0000 UTC m=+2044.748678051" observedRunningTime="2025-10-02 11:26:52.227728314 +0000 UTC m=+2045.543922595" watchObservedRunningTime="2025-10-02 11:26:52.229709486 +0000 UTC m=+2045.545903757" Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.039793 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-cm7cm"] Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.051484 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-v6ktl"] Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.059277 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-vtkp6"] Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.066354 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-vtkp6"] Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.073450 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-v6ktl"] Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.080548 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-cm7cm"] Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.557449 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="46f71a3d-452c-4ab3-a0e6-b3f318ab2cea" path="/var/lib/kubelet/pods/46f71a3d-452c-4ab3-a0e6-b3f318ab2cea/volumes" Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.558078 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b7d8ce26-e566-4c5b-86be-55096f9346f1" path="/var/lib/kubelet/pods/b7d8ce26-e566-4c5b-86be-55096f9346f1/volumes" Oct 02 11:26:59 crc kubenswrapper[4783]: I1002 11:26:59.558729 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ba16e941-c6c8-42a9-86c8-49aa0af48a36" path="/var/lib/kubelet/pods/ba16e941-c6c8-42a9-86c8-49aa0af48a36/volumes" Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.042177 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-c6a7-account-create-8sr5h"] Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.051611 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-26f6-account-create-hst2q"] Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.061568 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-7ce3-account-create-ng7bk"] Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.074868 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-26f6-account-create-hst2q"] Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.082978 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-7ce3-account-create-ng7bk"] Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.089355 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-c6a7-account-create-8sr5h"] Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.559845 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="154da498-2c29-4ee5-ba31-850329d73177" path="/var/lib/kubelet/pods/154da498-2c29-4ee5-ba31-850329d73177/volumes" Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.560486 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="999c77ea-3263-4172-b950-3281d5f9034e" path="/var/lib/kubelet/pods/999c77ea-3263-4172-b950-3281d5f9034e/volumes" Oct 02 11:27:09 crc kubenswrapper[4783]: I1002 11:27:09.561073 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dec7f801-786b-472a-8e75-a4db5d89f15c" path="/var/lib/kubelet/pods/dec7f801-786b-472a-8e75-a4db5d89f15c/volumes" Oct 02 11:27:21 crc kubenswrapper[4783]: I1002 11:27:21.513358 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:27:21 crc kubenswrapper[4783]: I1002 11:27:21.514514 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:27:21 crc kubenswrapper[4783]: I1002 11:27:21.514606 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:27:21 crc kubenswrapper[4783]: I1002 11:27:21.516058 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4273188ae108ec0f85de3e8dc29d684f9057cc5d4a801982cba2234f8872c085"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:27:21 crc kubenswrapper[4783]: I1002 11:27:21.516278 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://4273188ae108ec0f85de3e8dc29d684f9057cc5d4a801982cba2234f8872c085" gracePeriod=600 Oct 02 11:27:22 crc kubenswrapper[4783]: I1002 11:27:22.490555 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="4273188ae108ec0f85de3e8dc29d684f9057cc5d4a801982cba2234f8872c085" exitCode=0 Oct 02 11:27:22 crc kubenswrapper[4783]: I1002 11:27:22.490635 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"4273188ae108ec0f85de3e8dc29d684f9057cc5d4a801982cba2234f8872c085"} Oct 02 11:27:22 crc kubenswrapper[4783]: I1002 11:27:22.490902 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa"} Oct 02 11:27:22 crc kubenswrapper[4783]: I1002 11:27:22.490926 4783 scope.go:117] "RemoveContainer" containerID="a4f4b50638a10c4b177b4afe187ab7d697078645bdbd553938b22e713769fb04" Oct 02 11:27:49 crc kubenswrapper[4783]: I1002 11:27:49.040711 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tlclg"] Oct 02 11:27:49 crc kubenswrapper[4783]: I1002 11:27:49.054457 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-tlclg"] Oct 02 11:27:49 crc kubenswrapper[4783]: I1002 11:27:49.559732 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fba98d7a-3a8d-43dd-b494-a7769328b96e" path="/var/lib/kubelet/pods/fba98d7a-3a8d-43dd-b494-a7769328b96e/volumes" Oct 02 11:27:55 crc kubenswrapper[4783]: I1002 11:27:55.021979 4783 scope.go:117] "RemoveContainer" containerID="7eec719596d0f4fcf8513c04b7b2b884a13e424d80fca34e1425e6395ab14877" Oct 02 11:27:55 crc kubenswrapper[4783]: I1002 11:27:55.056634 4783 scope.go:117] "RemoveContainer" containerID="6a69d0ff96c3ed92e6fd8bafacb676d19d154b41bacc1d1dffa8937ddda457c0" Oct 02 11:27:55 crc kubenswrapper[4783]: I1002 11:27:55.122393 4783 scope.go:117] "RemoveContainer" containerID="1f96f2c30875af924334afcd548cc8c070cb1a14de538bb3f8ecc81fc2682e47" Oct 02 11:27:55 crc kubenswrapper[4783]: I1002 11:27:55.221706 4783 scope.go:117] "RemoveContainer" containerID="b17948d991ca5496c7138e40dae05320a691e18c619214bd060780abf39dec80" Oct 02 11:27:55 crc kubenswrapper[4783]: I1002 11:27:55.319719 4783 scope.go:117] "RemoveContainer" containerID="b6f802f6c9ce0a6f4eac792d704235d8eb80cddd292eead7a77e161b5a437961" Oct 02 11:27:55 crc kubenswrapper[4783]: I1002 11:27:55.355043 4783 scope.go:117] "RemoveContainer" containerID="62ae0881e9c33a7d3d13a85515153660452d1392dbc6513dc22a6366f2183d58" Oct 02 11:27:55 crc kubenswrapper[4783]: I1002 11:27:55.400504 4783 scope.go:117] "RemoveContainer" containerID="3250caf9baa85febd54656b6bb83aff29d6e1374aaf7417fe3b9711ee8687cce" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.402904 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kh9hj"] Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.409295 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.411543 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kh9hj"] Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.485158 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-catalog-content\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.485235 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-utilities\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.485316 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tb688\" (UniqueName: \"kubernetes.io/projected/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-kube-api-access-tb688\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.587152 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tb688\" (UniqueName: \"kubernetes.io/projected/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-kube-api-access-tb688\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.588506 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-catalog-content\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.588930 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-utilities\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.588858 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-catalog-content\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.589151 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-utilities\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.618547 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tb688\" (UniqueName: \"kubernetes.io/projected/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-kube-api-access-tb688\") pod \"certified-operators-kh9hj\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:04 crc kubenswrapper[4783]: I1002 11:28:04.748209 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:05 crc kubenswrapper[4783]: I1002 11:28:05.168490 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kh9hj"] Oct 02 11:28:05 crc kubenswrapper[4783]: I1002 11:28:05.871822 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh9hj" event={"ID":"111fb2ae-08ce-4366-aeda-7ec4f5d5432c","Type":"ContainerStarted","Data":"d3cb78ebe344a4707a87533ba19afe1817ec86e4dc098c01657f2418483ec754"} Oct 02 11:28:06 crc kubenswrapper[4783]: I1002 11:28:06.883845 4783 generic.go:334] "Generic (PLEG): container finished" podID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerID="5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b" exitCode=0 Oct 02 11:28:06 crc kubenswrapper[4783]: I1002 11:28:06.883951 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh9hj" event={"ID":"111fb2ae-08ce-4366-aeda-7ec4f5d5432c","Type":"ContainerDied","Data":"5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b"} Oct 02 11:28:09 crc kubenswrapper[4783]: I1002 11:28:09.916463 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh9hj" event={"ID":"111fb2ae-08ce-4366-aeda-7ec4f5d5432c","Type":"ContainerStarted","Data":"7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6"} Oct 02 11:28:12 crc kubenswrapper[4783]: I1002 11:28:12.953076 4783 generic.go:334] "Generic (PLEG): container finished" podID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerID="7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6" exitCode=0 Oct 02 11:28:12 crc kubenswrapper[4783]: I1002 11:28:12.953179 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh9hj" event={"ID":"111fb2ae-08ce-4366-aeda-7ec4f5d5432c","Type":"ContainerDied","Data":"7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6"} Oct 02 11:28:13 crc kubenswrapper[4783]: I1002 11:28:13.965567 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh9hj" event={"ID":"111fb2ae-08ce-4366-aeda-7ec4f5d5432c","Type":"ContainerStarted","Data":"9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc"} Oct 02 11:28:13 crc kubenswrapper[4783]: I1002 11:28:13.969125 4783 generic.go:334] "Generic (PLEG): container finished" podID="0a226960-8105-46f9-b2dc-fc4347bec328" containerID="692069f53fd56f8ad48722cb6c6886220fd32c749a4460584c8da32df3b49b71" exitCode=0 Oct 02 11:28:13 crc kubenswrapper[4783]: I1002 11:28:13.969167 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" event={"ID":"0a226960-8105-46f9-b2dc-fc4347bec328","Type":"ContainerDied","Data":"692069f53fd56f8ad48722cb6c6886220fd32c749a4460584c8da32df3b49b71"} Oct 02 11:28:13 crc kubenswrapper[4783]: I1002 11:28:13.992396 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kh9hj" podStartSLOduration=3.497033459 podStartE2EDuration="9.992378086s" podCreationTimestamp="2025-10-02 11:28:04 +0000 UTC" firstStartedPulling="2025-10-02 11:28:06.886142499 +0000 UTC m=+2120.202336770" lastFinishedPulling="2025-10-02 11:28:13.381487136 +0000 UTC m=+2126.697681397" observedRunningTime="2025-10-02 11:28:13.98724994 +0000 UTC m=+2127.303444201" watchObservedRunningTime="2025-10-02 11:28:13.992378086 +0000 UTC m=+2127.308572337" Oct 02 11:28:14 crc kubenswrapper[4783]: I1002 11:28:14.748734 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:14 crc kubenswrapper[4783]: I1002 11:28:14.748782 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.360327 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.422546 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-ssh-key\") pod \"0a226960-8105-46f9-b2dc-fc4347bec328\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.422711 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcpmc\" (UniqueName: \"kubernetes.io/projected/0a226960-8105-46f9-b2dc-fc4347bec328-kube-api-access-fcpmc\") pod \"0a226960-8105-46f9-b2dc-fc4347bec328\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.422938 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-inventory\") pod \"0a226960-8105-46f9-b2dc-fc4347bec328\" (UID: \"0a226960-8105-46f9-b2dc-fc4347bec328\") " Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.434602 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a226960-8105-46f9-b2dc-fc4347bec328-kube-api-access-fcpmc" (OuterVolumeSpecName: "kube-api-access-fcpmc") pod "0a226960-8105-46f9-b2dc-fc4347bec328" (UID: "0a226960-8105-46f9-b2dc-fc4347bec328"). InnerVolumeSpecName "kube-api-access-fcpmc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.462358 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-inventory" (OuterVolumeSpecName: "inventory") pod "0a226960-8105-46f9-b2dc-fc4347bec328" (UID: "0a226960-8105-46f9-b2dc-fc4347bec328"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.463746 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "0a226960-8105-46f9-b2dc-fc4347bec328" (UID: "0a226960-8105-46f9-b2dc-fc4347bec328"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.526593 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcpmc\" (UniqueName: \"kubernetes.io/projected/0a226960-8105-46f9-b2dc-fc4347bec328-kube-api-access-fcpmc\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.526822 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.526833 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/0a226960-8105-46f9-b2dc-fc4347bec328-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.802466 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-kh9hj" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="registry-server" probeResult="failure" output=< Oct 02 11:28:15 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:28:15 crc kubenswrapper[4783]: > Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.985680 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" event={"ID":"0a226960-8105-46f9-b2dc-fc4347bec328","Type":"ContainerDied","Data":"72f363349c1d612212f08177b3c3b85b8f228745a5d07d30e5cc511bebb72aa0"} Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.985728 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="72f363349c1d612212f08177b3c3b85b8f228745a5d07d30e5cc511bebb72aa0" Oct 02 11:28:15 crc kubenswrapper[4783]: I1002 11:28:15.985729 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.093911 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5"] Oct 02 11:28:16 crc kubenswrapper[4783]: E1002 11:28:16.094320 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a226960-8105-46f9-b2dc-fc4347bec328" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.094339 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a226960-8105-46f9-b2dc-fc4347bec328" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.094562 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a226960-8105-46f9-b2dc-fc4347bec328" containerName="configure-network-edpm-deployment-openstack-edpm-ipam" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.095337 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.098698 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.098779 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.099210 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.099480 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.104153 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5"] Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.139456 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-28hzq\" (UniqueName: \"kubernetes.io/projected/1b27c20b-97ac-477e-99c6-075d9f56c078-kube-api-access-28hzq\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.139750 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.139841 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.241525 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-28hzq\" (UniqueName: \"kubernetes.io/projected/1b27c20b-97ac-477e-99c6-075d9f56c078-kube-api-access-28hzq\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.241649 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.241726 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.247044 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-ssh-key\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.247436 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-inventory\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.258344 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-28hzq\" (UniqueName: \"kubernetes.io/projected/1b27c20b-97ac-477e-99c6-075d9f56c078-kube-api-access-28hzq\") pod \"validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.425591 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.951890 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5"] Oct 02 11:28:16 crc kubenswrapper[4783]: I1002 11:28:16.994818 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" event={"ID":"1b27c20b-97ac-477e-99c6-075d9f56c078","Type":"ContainerStarted","Data":"b58ffc3a1129906a43d975ead3126e91843443e1816017abfe890a2434abe08c"} Oct 02 11:28:19 crc kubenswrapper[4783]: I1002 11:28:19.011028 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" event={"ID":"1b27c20b-97ac-477e-99c6-075d9f56c078","Type":"ContainerStarted","Data":"c2b4d3b3ce5404a65c84162a3e4e5233b63f79585964592201b19e69d5b5ac5b"} Oct 02 11:28:19 crc kubenswrapper[4783]: I1002 11:28:19.045146 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" podStartSLOduration=1.522551969 podStartE2EDuration="3.045130515s" podCreationTimestamp="2025-10-02 11:28:16 +0000 UTC" firstStartedPulling="2025-10-02 11:28:16.957504798 +0000 UTC m=+2130.273699059" lastFinishedPulling="2025-10-02 11:28:18.480083344 +0000 UTC m=+2131.796277605" observedRunningTime="2025-10-02 11:28:19.041234032 +0000 UTC m=+2132.357428293" watchObservedRunningTime="2025-10-02 11:28:19.045130515 +0000 UTC m=+2132.361324776" Oct 02 11:28:19 crc kubenswrapper[4783]: I1002 11:28:19.063511 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-m7lqg"] Oct 02 11:28:19 crc kubenswrapper[4783]: I1002 11:28:19.072810 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-m7lqg"] Oct 02 11:28:19 crc kubenswrapper[4783]: I1002 11:28:19.559997 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1d4a6fe-7b83-4b7a-8b8f-93006697d03a" path="/var/lib/kubelet/pods/a1d4a6fe-7b83-4b7a-8b8f-93006697d03a/volumes" Oct 02 11:28:20 crc kubenswrapper[4783]: I1002 11:28:20.028648 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-28q9h"] Oct 02 11:28:20 crc kubenswrapper[4783]: I1002 11:28:20.035243 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-28q9h"] Oct 02 11:28:21 crc kubenswrapper[4783]: I1002 11:28:21.561439 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="757f9906-d7ab-4ae7-a2f2-5b83bf48b820" path="/var/lib/kubelet/pods/757f9906-d7ab-4ae7-a2f2-5b83bf48b820/volumes" Oct 02 11:28:24 crc kubenswrapper[4783]: I1002 11:28:24.794426 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:24 crc kubenswrapper[4783]: I1002 11:28:24.863036 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:25 crc kubenswrapper[4783]: I1002 11:28:25.034917 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kh9hj"] Oct 02 11:28:26 crc kubenswrapper[4783]: I1002 11:28:26.075305 4783 generic.go:334] "Generic (PLEG): container finished" podID="1b27c20b-97ac-477e-99c6-075d9f56c078" containerID="c2b4d3b3ce5404a65c84162a3e4e5233b63f79585964592201b19e69d5b5ac5b" exitCode=0 Oct 02 11:28:26 crc kubenswrapper[4783]: I1002 11:28:26.076001 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kh9hj" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="registry-server" containerID="cri-o://9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc" gracePeriod=2 Oct 02 11:28:26 crc kubenswrapper[4783]: I1002 11:28:26.076111 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" event={"ID":"1b27c20b-97ac-477e-99c6-075d9f56c078","Type":"ContainerDied","Data":"c2b4d3b3ce5404a65c84162a3e4e5233b63f79585964592201b19e69d5b5ac5b"} Oct 02 11:28:26 crc kubenswrapper[4783]: E1002 11:28:26.359248 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod111fb2ae_08ce_4366_aeda_7ec4f5d5432c.slice/crio-9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod111fb2ae_08ce_4366_aeda_7ec4f5d5432c.slice/crio-conmon-9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc.scope\": RecentStats: unable to find data in memory cache]" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.038318 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.086462 4783 generic.go:334] "Generic (PLEG): container finished" podID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerID="9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc" exitCode=0 Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.086661 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kh9hj" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.087842 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh9hj" event={"ID":"111fb2ae-08ce-4366-aeda-7ec4f5d5432c","Type":"ContainerDied","Data":"9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc"} Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.087919 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kh9hj" event={"ID":"111fb2ae-08ce-4366-aeda-7ec4f5d5432c","Type":"ContainerDied","Data":"d3cb78ebe344a4707a87533ba19afe1817ec86e4dc098c01657f2418483ec754"} Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.087962 4783 scope.go:117] "RemoveContainer" containerID="9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.115342 4783 scope.go:117] "RemoveContainer" containerID="7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.136250 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-utilities\") pod \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.136303 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-catalog-content\") pod \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.136460 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tb688\" (UniqueName: \"kubernetes.io/projected/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-kube-api-access-tb688\") pod \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\" (UID: \"111fb2ae-08ce-4366-aeda-7ec4f5d5432c\") " Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.138502 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-utilities" (OuterVolumeSpecName: "utilities") pod "111fb2ae-08ce-4366-aeda-7ec4f5d5432c" (UID: "111fb2ae-08ce-4366-aeda-7ec4f5d5432c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.147217 4783 scope.go:117] "RemoveContainer" containerID="5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.149535 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-kube-api-access-tb688" (OuterVolumeSpecName: "kube-api-access-tb688") pod "111fb2ae-08ce-4366-aeda-7ec4f5d5432c" (UID: "111fb2ae-08ce-4366-aeda-7ec4f5d5432c"). InnerVolumeSpecName "kube-api-access-tb688". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.191724 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "111fb2ae-08ce-4366-aeda-7ec4f5d5432c" (UID: "111fb2ae-08ce-4366-aeda-7ec4f5d5432c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.226853 4783 scope.go:117] "RemoveContainer" containerID="9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc" Oct 02 11:28:27 crc kubenswrapper[4783]: E1002 11:28:27.227390 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc\": container with ID starting with 9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc not found: ID does not exist" containerID="9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.227461 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc"} err="failed to get container status \"9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc\": rpc error: code = NotFound desc = could not find container \"9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc\": container with ID starting with 9970f40951f02134ce402c664787515cfb1789b4bee55131c8a4dc891c0edacc not found: ID does not exist" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.227490 4783 scope.go:117] "RemoveContainer" containerID="7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6" Oct 02 11:28:27 crc kubenswrapper[4783]: E1002 11:28:27.227872 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6\": container with ID starting with 7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6 not found: ID does not exist" containerID="7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.227928 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6"} err="failed to get container status \"7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6\": rpc error: code = NotFound desc = could not find container \"7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6\": container with ID starting with 7716bede7ea4626535f6ac56d2154b96fa9d9c8d0d579274d8b4f0230d4fccd6 not found: ID does not exist" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.227955 4783 scope.go:117] "RemoveContainer" containerID="5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b" Oct 02 11:28:27 crc kubenswrapper[4783]: E1002 11:28:27.228329 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b\": container with ID starting with 5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b not found: ID does not exist" containerID="5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.228367 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b"} err="failed to get container status \"5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b\": rpc error: code = NotFound desc = could not find container \"5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b\": container with ID starting with 5fadb5fcbc9fb78d4cd0e82063ce57af0a6e3e1282b9508ad7349bb08286e71b not found: ID does not exist" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.238834 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.238875 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tb688\" (UniqueName: \"kubernetes.io/projected/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-kube-api-access-tb688\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.238889 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/111fb2ae-08ce-4366-aeda-7ec4f5d5432c-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.420512 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kh9hj"] Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.428983 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kh9hj"] Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.499143 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.542347 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-28hzq\" (UniqueName: \"kubernetes.io/projected/1b27c20b-97ac-477e-99c6-075d9f56c078-kube-api-access-28hzq\") pod \"1b27c20b-97ac-477e-99c6-075d9f56c078\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.542394 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-inventory\") pod \"1b27c20b-97ac-477e-99c6-075d9f56c078\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.542475 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-ssh-key\") pod \"1b27c20b-97ac-477e-99c6-075d9f56c078\" (UID: \"1b27c20b-97ac-477e-99c6-075d9f56c078\") " Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.561538 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b27c20b-97ac-477e-99c6-075d9f56c078-kube-api-access-28hzq" (OuterVolumeSpecName: "kube-api-access-28hzq") pod "1b27c20b-97ac-477e-99c6-075d9f56c078" (UID: "1b27c20b-97ac-477e-99c6-075d9f56c078"). InnerVolumeSpecName "kube-api-access-28hzq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.571376 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" path="/var/lib/kubelet/pods/111fb2ae-08ce-4366-aeda-7ec4f5d5432c/volumes" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.592403 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "1b27c20b-97ac-477e-99c6-075d9f56c078" (UID: "1b27c20b-97ac-477e-99c6-075d9f56c078"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.603227 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-inventory" (OuterVolumeSpecName: "inventory") pod "1b27c20b-97ac-477e-99c6-075d9f56c078" (UID: "1b27c20b-97ac-477e-99c6-075d9f56c078"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.644133 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-28hzq\" (UniqueName: \"kubernetes.io/projected/1b27c20b-97ac-477e-99c6-075d9f56c078-kube-api-access-28hzq\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.644171 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:27 crc kubenswrapper[4783]: I1002 11:28:27.644181 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/1b27c20b-97ac-477e-99c6-075d9f56c078-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.097272 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" event={"ID":"1b27c20b-97ac-477e-99c6-075d9f56c078","Type":"ContainerDied","Data":"b58ffc3a1129906a43d975ead3126e91843443e1816017abfe890a2434abe08c"} Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.097312 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b58ffc3a1129906a43d975ead3126e91843443e1816017abfe890a2434abe08c" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.097323 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.272808 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb"] Oct 02 11:28:28 crc kubenswrapper[4783]: E1002 11:28:28.273179 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="registry-server" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.273198 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="registry-server" Oct 02 11:28:28 crc kubenswrapper[4783]: E1002 11:28:28.273208 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b27c20b-97ac-477e-99c6-075d9f56c078" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.273216 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b27c20b-97ac-477e-99c6-075d9f56c078" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 02 11:28:28 crc kubenswrapper[4783]: E1002 11:28:28.273240 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="extract-content" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.273248 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="extract-content" Oct 02 11:28:28 crc kubenswrapper[4783]: E1002 11:28:28.273271 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="extract-utilities" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.273277 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="extract-utilities" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.273569 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b27c20b-97ac-477e-99c6-075d9f56c078" containerName="validate-network-edpm-deployment-openstack-edpm-ipam" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.273592 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="111fb2ae-08ce-4366-aeda-7ec4f5d5432c" containerName="registry-server" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.274213 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.276221 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.276871 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.277137 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.277263 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.298323 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb"] Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.458284 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.458457 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h5pwd\" (UniqueName: \"kubernetes.io/projected/ccf4cc85-e2ac-4332-afb4-9dde935527f0-kube-api-access-h5pwd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.458498 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.560748 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.560805 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h5pwd\" (UniqueName: \"kubernetes.io/projected/ccf4cc85-e2ac-4332-afb4-9dde935527f0-kube-api-access-h5pwd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.560836 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.567807 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-ssh-key\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.574099 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-inventory\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.580812 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h5pwd\" (UniqueName: \"kubernetes.io/projected/ccf4cc85-e2ac-4332-afb4-9dde935527f0-kube-api-access-h5pwd\") pod \"install-os-edpm-deployment-openstack-edpm-ipam-7kjqb\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:28 crc kubenswrapper[4783]: I1002 11:28:28.599182 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:28:29 crc kubenswrapper[4783]: I1002 11:28:29.084937 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb"] Oct 02 11:28:29 crc kubenswrapper[4783]: I1002 11:28:29.108995 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" event={"ID":"ccf4cc85-e2ac-4332-afb4-9dde935527f0","Type":"ContainerStarted","Data":"52311edb5c6533fce51184fc8b2548fdc7146eed3337e6e1d952dcccf3741dd5"} Oct 02 11:28:31 crc kubenswrapper[4783]: I1002 11:28:31.124953 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" event={"ID":"ccf4cc85-e2ac-4332-afb4-9dde935527f0","Type":"ContainerStarted","Data":"7ade3d6f0c21083b4c6206fb5edd1bbcf7c5079f304071ecb58184b914762051"} Oct 02 11:28:31 crc kubenswrapper[4783]: I1002 11:28:31.142273 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" podStartSLOduration=1.658378204 podStartE2EDuration="3.142252363s" podCreationTimestamp="2025-10-02 11:28:28 +0000 UTC" firstStartedPulling="2025-10-02 11:28:29.087227066 +0000 UTC m=+2142.403421327" lastFinishedPulling="2025-10-02 11:28:30.571101225 +0000 UTC m=+2143.887295486" observedRunningTime="2025-10-02 11:28:31.138913263 +0000 UTC m=+2144.455107524" watchObservedRunningTime="2025-10-02 11:28:31.142252363 +0000 UTC m=+2144.458446624" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.138918 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-n5982"] Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.144158 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.147982 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n5982"] Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.255051 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kgz2v\" (UniqueName: \"kubernetes.io/projected/953fb357-7583-465b-ad75-ea86cc5a60a4-kube-api-access-kgz2v\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.255122 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-catalog-content\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.255161 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-utilities\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.357228 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kgz2v\" (UniqueName: \"kubernetes.io/projected/953fb357-7583-465b-ad75-ea86cc5a60a4-kube-api-access-kgz2v\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.357290 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-catalog-content\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.357324 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-utilities\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.357951 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-utilities\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.358009 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-catalog-content\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.390448 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kgz2v\" (UniqueName: \"kubernetes.io/projected/953fb357-7583-465b-ad75-ea86cc5a60a4-kube-api-access-kgz2v\") pod \"redhat-marketplace-n5982\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.469060 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.738758 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-hgk9l"] Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.744302 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.775027 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgk9l"] Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.869454 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-catalog-content\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.869546 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-utilities\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.869608 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n49zh\" (UniqueName: \"kubernetes.io/projected/4a537075-67c8-4620-a04b-ce3bff07d4c9-kube-api-access-n49zh\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.971513 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-catalog-content\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.971644 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-utilities\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.971699 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n49zh\" (UniqueName: \"kubernetes.io/projected/4a537075-67c8-4620-a04b-ce3bff07d4c9-kube-api-access-n49zh\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.972221 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-utilities\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.972228 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-catalog-content\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:44 crc kubenswrapper[4783]: I1002 11:28:44.991569 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-n5982"] Oct 02 11:28:45 crc kubenswrapper[4783]: I1002 11:28:45.016799 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n49zh\" (UniqueName: \"kubernetes.io/projected/4a537075-67c8-4620-a04b-ce3bff07d4c9-kube-api-access-n49zh\") pod \"redhat-operators-hgk9l\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:45 crc kubenswrapper[4783]: I1002 11:28:45.068252 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:28:45 crc kubenswrapper[4783]: I1002 11:28:45.248419 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n5982" event={"ID":"953fb357-7583-465b-ad75-ea86cc5a60a4","Type":"ContainerStarted","Data":"145f9e0f3caca15cc0c16eee92289e9143a9d182e2beea16051805f0b29239e6"} Oct 02 11:28:45 crc kubenswrapper[4783]: I1002 11:28:45.582253 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-hgk9l"] Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.260490 4783 generic.go:334] "Generic (PLEG): container finished" podID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerID="3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0" exitCode=0 Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.260555 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgk9l" event={"ID":"4a537075-67c8-4620-a04b-ce3bff07d4c9","Type":"ContainerDied","Data":"3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0"} Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.260579 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgk9l" event={"ID":"4a537075-67c8-4620-a04b-ce3bff07d4c9","Type":"ContainerStarted","Data":"9268e768295510119a6e850dde3eb87fdb61881b33b5a2677edd66be80e7a3c2"} Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.262710 4783 generic.go:334] "Generic (PLEG): container finished" podID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerID="0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6" exitCode=0 Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.262729 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n5982" event={"ID":"953fb357-7583-465b-ad75-ea86cc5a60a4","Type":"ContainerDied","Data":"0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6"} Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.533475 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-znf25"] Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.536062 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.552484 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znf25"] Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.704922 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d8zm8\" (UniqueName: \"kubernetes.io/projected/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-kube-api-access-d8zm8\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.705948 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-catalog-content\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.706425 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-utilities\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.809468 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d8zm8\" (UniqueName: \"kubernetes.io/projected/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-kube-api-access-d8zm8\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.809616 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-catalog-content\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.809640 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-utilities\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.810217 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-utilities\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.810218 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-catalog-content\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.840690 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d8zm8\" (UniqueName: \"kubernetes.io/projected/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-kube-api-access-d8zm8\") pod \"community-operators-znf25\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:46 crc kubenswrapper[4783]: I1002 11:28:46.861399 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:28:47 crc kubenswrapper[4783]: I1002 11:28:47.510523 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-znf25"] Oct 02 11:28:48 crc kubenswrapper[4783]: I1002 11:28:48.284667 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znf25" event={"ID":"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3","Type":"ContainerStarted","Data":"654175d6657daf528c4d54ceabfbfebbfa5c8e30f2ffe5c02a85a6118b6073e9"} Oct 02 11:28:53 crc kubenswrapper[4783]: I1002 11:28:53.337155 4783 generic.go:334] "Generic (PLEG): container finished" podID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerID="8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2" exitCode=0 Oct 02 11:28:53 crc kubenswrapper[4783]: I1002 11:28:53.337690 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znf25" event={"ID":"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3","Type":"ContainerDied","Data":"8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2"} Oct 02 11:28:53 crc kubenswrapper[4783]: I1002 11:28:53.348530 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n5982" event={"ID":"953fb357-7583-465b-ad75-ea86cc5a60a4","Type":"ContainerStarted","Data":"1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606"} Oct 02 11:28:53 crc kubenswrapper[4783]: I1002 11:28:53.354013 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgk9l" event={"ID":"4a537075-67c8-4620-a04b-ce3bff07d4c9","Type":"ContainerStarted","Data":"2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1"} Oct 02 11:28:54 crc kubenswrapper[4783]: I1002 11:28:54.367680 4783 generic.go:334] "Generic (PLEG): container finished" podID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerID="1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606" exitCode=0 Oct 02 11:28:54 crc kubenswrapper[4783]: I1002 11:28:54.368610 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n5982" event={"ID":"953fb357-7583-465b-ad75-ea86cc5a60a4","Type":"ContainerDied","Data":"1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606"} Oct 02 11:28:55 crc kubenswrapper[4783]: I1002 11:28:55.627170 4783 scope.go:117] "RemoveContainer" containerID="e0599da871f31fffa19e1bfd33063e9b28ad45179bccfa39bc3d0d2474104dcb" Oct 02 11:28:55 crc kubenswrapper[4783]: I1002 11:28:55.768796 4783 scope.go:117] "RemoveContainer" containerID="55e37a7c51cd44639abf5ca8788fa6eb1a731d04d07d1467c686f65391221c4d" Oct 02 11:28:59 crc kubenswrapper[4783]: I1002 11:28:59.042228 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgxlb"] Oct 02 11:28:59 crc kubenswrapper[4783]: I1002 11:28:59.061973 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-mgxlb"] Oct 02 11:28:59 crc kubenswrapper[4783]: I1002 11:28:59.557200 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82576b63-a496-439f-a656-f89a0dc00ab9" path="/var/lib/kubelet/pods/82576b63-a496-439f-a656-f89a0dc00ab9/volumes" Oct 02 11:29:02 crc kubenswrapper[4783]: I1002 11:29:02.451008 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znf25" event={"ID":"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3","Type":"ContainerStarted","Data":"64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9"} Oct 02 11:29:02 crc kubenswrapper[4783]: I1002 11:29:02.457443 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n5982" event={"ID":"953fb357-7583-465b-ad75-ea86cc5a60a4","Type":"ContainerStarted","Data":"760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0"} Oct 02 11:29:02 crc kubenswrapper[4783]: I1002 11:29:02.508558 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-n5982" podStartSLOduration=3.434944748 podStartE2EDuration="18.508535889s" podCreationTimestamp="2025-10-02 11:28:44 +0000 UTC" firstStartedPulling="2025-10-02 11:28:46.26374528 +0000 UTC m=+2159.579939541" lastFinishedPulling="2025-10-02 11:29:01.337336421 +0000 UTC m=+2174.653530682" observedRunningTime="2025-10-02 11:29:02.498626711 +0000 UTC m=+2175.814820982" watchObservedRunningTime="2025-10-02 11:29:02.508535889 +0000 UTC m=+2175.824730150" Oct 02 11:29:04 crc kubenswrapper[4783]: I1002 11:29:04.469145 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:29:04 crc kubenswrapper[4783]: I1002 11:29:04.469589 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:29:05 crc kubenswrapper[4783]: I1002 11:29:05.532294 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-n5982" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:05 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:05 crc kubenswrapper[4783]: > Oct 02 11:29:14 crc kubenswrapper[4783]: I1002 11:29:14.575206 4783 generic.go:334] "Generic (PLEG): container finished" podID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerID="2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1" exitCode=0 Oct 02 11:29:14 crc kubenswrapper[4783]: I1002 11:29:14.575382 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgk9l" event={"ID":"4a537075-67c8-4620-a04b-ce3bff07d4c9","Type":"ContainerDied","Data":"2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1"} Oct 02 11:29:14 crc kubenswrapper[4783]: I1002 11:29:14.592180 4783 generic.go:334] "Generic (PLEG): container finished" podID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerID="64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9" exitCode=0 Oct 02 11:29:14 crc kubenswrapper[4783]: I1002 11:29:14.592237 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znf25" event={"ID":"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3","Type":"ContainerDied","Data":"64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9"} Oct 02 11:29:14 crc kubenswrapper[4783]: I1002 11:29:14.599916 4783 generic.go:334] "Generic (PLEG): container finished" podID="ccf4cc85-e2ac-4332-afb4-9dde935527f0" containerID="7ade3d6f0c21083b4c6206fb5edd1bbcf7c5079f304071ecb58184b914762051" exitCode=0 Oct 02 11:29:14 crc kubenswrapper[4783]: I1002 11:29:14.599961 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" event={"ID":"ccf4cc85-e2ac-4332-afb4-9dde935527f0","Type":"ContainerDied","Data":"7ade3d6f0c21083b4c6206fb5edd1bbcf7c5079f304071ecb58184b914762051"} Oct 02 11:29:15 crc kubenswrapper[4783]: I1002 11:29:15.523676 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-n5982" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:15 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:15 crc kubenswrapper[4783]: > Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.029130 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.209583 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-ssh-key\") pod \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.209672 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h5pwd\" (UniqueName: \"kubernetes.io/projected/ccf4cc85-e2ac-4332-afb4-9dde935527f0-kube-api-access-h5pwd\") pod \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.209768 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-inventory\") pod \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\" (UID: \"ccf4cc85-e2ac-4332-afb4-9dde935527f0\") " Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.218389 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ccf4cc85-e2ac-4332-afb4-9dde935527f0-kube-api-access-h5pwd" (OuterVolumeSpecName: "kube-api-access-h5pwd") pod "ccf4cc85-e2ac-4332-afb4-9dde935527f0" (UID: "ccf4cc85-e2ac-4332-afb4-9dde935527f0"). InnerVolumeSpecName "kube-api-access-h5pwd". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.247517 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ccf4cc85-e2ac-4332-afb4-9dde935527f0" (UID: "ccf4cc85-e2ac-4332-afb4-9dde935527f0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.251320 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-inventory" (OuterVolumeSpecName: "inventory") pod "ccf4cc85-e2ac-4332-afb4-9dde935527f0" (UID: "ccf4cc85-e2ac-4332-afb4-9dde935527f0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.312076 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.312109 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h5pwd\" (UniqueName: \"kubernetes.io/projected/ccf4cc85-e2ac-4332-afb4-9dde935527f0-kube-api-access-h5pwd\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.312123 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ccf4cc85-e2ac-4332-afb4-9dde935527f0-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.616940 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" event={"ID":"ccf4cc85-e2ac-4332-afb4-9dde935527f0","Type":"ContainerDied","Data":"52311edb5c6533fce51184fc8b2548fdc7146eed3337e6e1d952dcccf3741dd5"} Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.616980 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="52311edb5c6533fce51184fc8b2548fdc7146eed3337e6e1d952dcccf3741dd5" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.616945 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-os-edpm-deployment-openstack-edpm-ipam-7kjqb" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.619911 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgk9l" event={"ID":"4a537075-67c8-4620-a04b-ce3bff07d4c9","Type":"ContainerStarted","Data":"c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63"} Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.647628 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-hgk9l" podStartSLOduration=2.911461337 podStartE2EDuration="32.647609672s" podCreationTimestamp="2025-10-02 11:28:44 +0000 UTC" firstStartedPulling="2025-10-02 11:28:46.262099635 +0000 UTC m=+2159.578293916" lastFinishedPulling="2025-10-02 11:29:15.99824799 +0000 UTC m=+2189.314442251" observedRunningTime="2025-10-02 11:29:16.640817618 +0000 UTC m=+2189.957011879" watchObservedRunningTime="2025-10-02 11:29:16.647609672 +0000 UTC m=+2189.963803933" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.717917 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd"] Oct 02 11:29:16 crc kubenswrapper[4783]: E1002 11:29:16.718394 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ccf4cc85-e2ac-4332-afb4-9dde935527f0" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.718434 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ccf4cc85-e2ac-4332-afb4-9dde935527f0" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.718670 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ccf4cc85-e2ac-4332-afb4-9dde935527f0" containerName="install-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.719470 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.723556 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.723922 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.725743 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.725871 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.742345 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd"] Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.821316 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch65w\" (UniqueName: \"kubernetes.io/projected/29cd1f95-5f6b-451a-81a4-d78e56e04c43-kube-api-access-ch65w\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.821453 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.821549 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.923495 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.923643 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.923758 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch65w\" (UniqueName: \"kubernetes.io/projected/29cd1f95-5f6b-451a-81a4-d78e56e04c43-kube-api-access-ch65w\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.931186 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.931770 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:16 crc kubenswrapper[4783]: I1002 11:29:16.942368 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch65w\" (UniqueName: \"kubernetes.io/projected/29cd1f95-5f6b-451a-81a4-d78e56e04c43-kube-api-access-ch65w\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:17 crc kubenswrapper[4783]: I1002 11:29:17.042255 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:29:17 crc kubenswrapper[4783]: I1002 11:29:17.591758 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd"] Oct 02 11:29:17 crc kubenswrapper[4783]: W1002 11:29:17.596130 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod29cd1f95_5f6b_451a_81a4_d78e56e04c43.slice/crio-ed97454cfa896a22fc66a518724327382030ce50f9d259a68a6b3ee18aad9363 WatchSource:0}: Error finding container ed97454cfa896a22fc66a518724327382030ce50f9d259a68a6b3ee18aad9363: Status 404 returned error can't find the container with id ed97454cfa896a22fc66a518724327382030ce50f9d259a68a6b3ee18aad9363 Oct 02 11:29:17 crc kubenswrapper[4783]: I1002 11:29:17.636497 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znf25" event={"ID":"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3","Type":"ContainerStarted","Data":"18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5"} Oct 02 11:29:17 crc kubenswrapper[4783]: I1002 11:29:17.639590 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" event={"ID":"29cd1f95-5f6b-451a-81a4-d78e56e04c43","Type":"ContainerStarted","Data":"ed97454cfa896a22fc66a518724327382030ce50f9d259a68a6b3ee18aad9363"} Oct 02 11:29:17 crc kubenswrapper[4783]: I1002 11:29:17.663786 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-znf25" podStartSLOduration=8.580720996 podStartE2EDuration="31.663768259s" podCreationTimestamp="2025-10-02 11:28:46 +0000 UTC" firstStartedPulling="2025-10-02 11:28:53.340110032 +0000 UTC m=+2166.656304293" lastFinishedPulling="2025-10-02 11:29:16.423157295 +0000 UTC m=+2189.739351556" observedRunningTime="2025-10-02 11:29:17.656916533 +0000 UTC m=+2190.973110794" watchObservedRunningTime="2025-10-02 11:29:17.663768259 +0000 UTC m=+2190.979962520" Oct 02 11:29:21 crc kubenswrapper[4783]: I1002 11:29:21.513861 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:29:21 crc kubenswrapper[4783]: I1002 11:29:21.514279 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:29:21 crc kubenswrapper[4783]: I1002 11:29:21.670901 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" event={"ID":"29cd1f95-5f6b-451a-81a4-d78e56e04c43","Type":"ContainerStarted","Data":"c14993878e51a2eb7cce083d1877c4ebd79da571fc4ee55cb87a58a1720b6620"} Oct 02 11:29:21 crc kubenswrapper[4783]: I1002 11:29:21.698667 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" podStartSLOduration=2.437430371 podStartE2EDuration="5.69864808s" podCreationTimestamp="2025-10-02 11:29:16 +0000 UTC" firstStartedPulling="2025-10-02 11:29:17.598992698 +0000 UTC m=+2190.915186959" lastFinishedPulling="2025-10-02 11:29:20.860210407 +0000 UTC m=+2194.176404668" observedRunningTime="2025-10-02 11:29:21.697060457 +0000 UTC m=+2195.013254748" watchObservedRunningTime="2025-10-02 11:29:21.69864808 +0000 UTC m=+2195.014842341" Oct 02 11:29:25 crc kubenswrapper[4783]: I1002 11:29:25.068672 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:29:25 crc kubenswrapper[4783]: I1002 11:29:25.069315 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:29:25 crc kubenswrapper[4783]: I1002 11:29:25.517911 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-n5982" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:25 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:25 crc kubenswrapper[4783]: > Oct 02 11:29:26 crc kubenswrapper[4783]: I1002 11:29:26.117814 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgk9l" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:26 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:26 crc kubenswrapper[4783]: > Oct 02 11:29:26 crc kubenswrapper[4783]: I1002 11:29:26.862162 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:29:26 crc kubenswrapper[4783]: I1002 11:29:26.862574 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:29:27 crc kubenswrapper[4783]: I1002 11:29:27.905979 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-znf25" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:27 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:27 crc kubenswrapper[4783]: > Oct 02 11:29:34 crc kubenswrapper[4783]: I1002 11:29:34.517451 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:29:34 crc kubenswrapper[4783]: I1002 11:29:34.567893 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:29:34 crc kubenswrapper[4783]: I1002 11:29:34.754125 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n5982"] Oct 02 11:29:35 crc kubenswrapper[4783]: I1002 11:29:35.796790 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-n5982" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="registry-server" containerID="cri-o://760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0" gracePeriod=2 Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.110053 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgk9l" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:36 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:36 crc kubenswrapper[4783]: > Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.492256 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.589322 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-utilities\") pod \"953fb357-7583-465b-ad75-ea86cc5a60a4\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.589478 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kgz2v\" (UniqueName: \"kubernetes.io/projected/953fb357-7583-465b-ad75-ea86cc5a60a4-kube-api-access-kgz2v\") pod \"953fb357-7583-465b-ad75-ea86cc5a60a4\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.589561 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-catalog-content\") pod \"953fb357-7583-465b-ad75-ea86cc5a60a4\" (UID: \"953fb357-7583-465b-ad75-ea86cc5a60a4\") " Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.590197 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-utilities" (OuterVolumeSpecName: "utilities") pod "953fb357-7583-465b-ad75-ea86cc5a60a4" (UID: "953fb357-7583-465b-ad75-ea86cc5a60a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.602086 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "953fb357-7583-465b-ad75-ea86cc5a60a4" (UID: "953fb357-7583-465b-ad75-ea86cc5a60a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.607102 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/953fb357-7583-465b-ad75-ea86cc5a60a4-kube-api-access-kgz2v" (OuterVolumeSpecName: "kube-api-access-kgz2v") pod "953fb357-7583-465b-ad75-ea86cc5a60a4" (UID: "953fb357-7583-465b-ad75-ea86cc5a60a4"). InnerVolumeSpecName "kube-api-access-kgz2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.692212 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kgz2v\" (UniqueName: \"kubernetes.io/projected/953fb357-7583-465b-ad75-ea86cc5a60a4-kube-api-access-kgz2v\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.692241 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.692250 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/953fb357-7583-465b-ad75-ea86cc5a60a4-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.806985 4783 generic.go:334] "Generic (PLEG): container finished" podID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerID="760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0" exitCode=0 Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.807029 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n5982" event={"ID":"953fb357-7583-465b-ad75-ea86cc5a60a4","Type":"ContainerDied","Data":"760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0"} Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.807066 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-n5982" event={"ID":"953fb357-7583-465b-ad75-ea86cc5a60a4","Type":"ContainerDied","Data":"145f9e0f3caca15cc0c16eee92289e9143a9d182e2beea16051805f0b29239e6"} Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.807089 4783 scope.go:117] "RemoveContainer" containerID="760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.807096 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-n5982" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.849945 4783 scope.go:117] "RemoveContainer" containerID="1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.850367 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-n5982"] Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.862761 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-n5982"] Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.914507 4783 scope.go:117] "RemoveContainer" containerID="0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.935119 4783 scope.go:117] "RemoveContainer" containerID="760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0" Oct 02 11:29:36 crc kubenswrapper[4783]: E1002 11:29:36.935854 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0\": container with ID starting with 760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0 not found: ID does not exist" containerID="760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.935908 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0"} err="failed to get container status \"760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0\": rpc error: code = NotFound desc = could not find container \"760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0\": container with ID starting with 760f26ed149b837206e8eb6092fa53b1ab7992101742ac6551177108c734dde0 not found: ID does not exist" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.935944 4783 scope.go:117] "RemoveContainer" containerID="1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606" Oct 02 11:29:36 crc kubenswrapper[4783]: E1002 11:29:36.936328 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606\": container with ID starting with 1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606 not found: ID does not exist" containerID="1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.936348 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606"} err="failed to get container status \"1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606\": rpc error: code = NotFound desc = could not find container \"1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606\": container with ID starting with 1a99c78f66832933a9da368cabde6230f5b22e6b7a8c44d62880cfd379850606 not found: ID does not exist" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.936364 4783 scope.go:117] "RemoveContainer" containerID="0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6" Oct 02 11:29:36 crc kubenswrapper[4783]: E1002 11:29:36.936818 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6\": container with ID starting with 0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6 not found: ID does not exist" containerID="0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6" Oct 02 11:29:36 crc kubenswrapper[4783]: I1002 11:29:36.936848 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6"} err="failed to get container status \"0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6\": rpc error: code = NotFound desc = could not find container \"0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6\": container with ID starting with 0033c823ea69f656481644a07e4b7a69b1332e633007c90e0e660bcb0d39ddb6 not found: ID does not exist" Oct 02 11:29:37 crc kubenswrapper[4783]: I1002 11:29:37.569143 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" path="/var/lib/kubelet/pods/953fb357-7583-465b-ad75-ea86cc5a60a4/volumes" Oct 02 11:29:37 crc kubenswrapper[4783]: I1002 11:29:37.928083 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-znf25" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:37 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:37 crc kubenswrapper[4783]: > Oct 02 11:29:46 crc kubenswrapper[4783]: I1002 11:29:46.113186 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgk9l" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:46 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:46 crc kubenswrapper[4783]: > Oct 02 11:29:47 crc kubenswrapper[4783]: I1002 11:29:47.910355 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-znf25" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:47 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:47 crc kubenswrapper[4783]: > Oct 02 11:29:51 crc kubenswrapper[4783]: I1002 11:29:51.513771 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:29:51 crc kubenswrapper[4783]: I1002 11:29:51.514304 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:29:56 crc kubenswrapper[4783]: I1002 11:29:56.004516 4783 scope.go:117] "RemoveContainer" containerID="ca2acf093e1a10a32251148a745d912fd1e659a9eb64dd8fa37e8064ca23c6c3" Oct 02 11:29:56 crc kubenswrapper[4783]: I1002 11:29:56.114302 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-hgk9l" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" probeResult="failure" output=< Oct 02 11:29:56 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:29:56 crc kubenswrapper[4783]: > Oct 02 11:29:56 crc kubenswrapper[4783]: I1002 11:29:56.912483 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:29:56 crc kubenswrapper[4783]: I1002 11:29:56.967764 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:29:57 crc kubenswrapper[4783]: I1002 11:29:57.154038 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-znf25"] Oct 02 11:29:58 crc kubenswrapper[4783]: I1002 11:29:58.003859 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-znf25" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="registry-server" containerID="cri-o://18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5" gracePeriod=2 Oct 02 11:29:58 crc kubenswrapper[4783]: I1002 11:29:58.945025 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.013742 4783 generic.go:334] "Generic (PLEG): container finished" podID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerID="18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5" exitCode=0 Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.013815 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znf25" event={"ID":"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3","Type":"ContainerDied","Data":"18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5"} Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.013827 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-znf25" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.013871 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-znf25" event={"ID":"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3","Type":"ContainerDied","Data":"654175d6657daf528c4d54ceabfbfebbfa5c8e30f2ffe5c02a85a6118b6073e9"} Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.013892 4783 scope.go:117] "RemoveContainer" containerID="18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.021953 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d8zm8\" (UniqueName: \"kubernetes.io/projected/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-kube-api-access-d8zm8\") pod \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.022087 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-utilities\") pod \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.022265 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-catalog-content\") pod \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\" (UID: \"e5d9a0b6-64c2-4a13-8f58-c671f7818ab3\") " Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.028564 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-utilities" (OuterVolumeSpecName: "utilities") pod "e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" (UID: "e5d9a0b6-64c2-4a13-8f58-c671f7818ab3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.029186 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-kube-api-access-d8zm8" (OuterVolumeSpecName: "kube-api-access-d8zm8") pod "e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" (UID: "e5d9a0b6-64c2-4a13-8f58-c671f7818ab3"). InnerVolumeSpecName "kube-api-access-d8zm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.043190 4783 scope.go:117] "RemoveContainer" containerID="64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.084317 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" (UID: "e5d9a0b6-64c2-4a13-8f58-c671f7818ab3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.119008 4783 scope.go:117] "RemoveContainer" containerID="8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.124393 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d8zm8\" (UniqueName: \"kubernetes.io/projected/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-kube-api-access-d8zm8\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.124513 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.124528 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.154830 4783 scope.go:117] "RemoveContainer" containerID="18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5" Oct 02 11:29:59 crc kubenswrapper[4783]: E1002 11:29:59.155390 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5\": container with ID starting with 18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5 not found: ID does not exist" containerID="18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.155501 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5"} err="failed to get container status \"18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5\": rpc error: code = NotFound desc = could not find container \"18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5\": container with ID starting with 18c09ee401a38a828ceb21160784e6a34a49be5eabc33c8e0b3ad610e72f71f5 not found: ID does not exist" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.155551 4783 scope.go:117] "RemoveContainer" containerID="64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9" Oct 02 11:29:59 crc kubenswrapper[4783]: E1002 11:29:59.155981 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9\": container with ID starting with 64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9 not found: ID does not exist" containerID="64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.156029 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9"} err="failed to get container status \"64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9\": rpc error: code = NotFound desc = could not find container \"64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9\": container with ID starting with 64697afe1d91892df953d71290c8d224b0d0e248829c00291e5e20ae043cd1a9 not found: ID does not exist" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.156048 4783 scope.go:117] "RemoveContainer" containerID="8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2" Oct 02 11:29:59 crc kubenswrapper[4783]: E1002 11:29:59.156300 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2\": container with ID starting with 8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2 not found: ID does not exist" containerID="8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.156331 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2"} err="failed to get container status \"8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2\": rpc error: code = NotFound desc = could not find container \"8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2\": container with ID starting with 8294607b3d05c13ef5d2193d7475a1d3a2210a05ff0da99b6e412f5d2ea1dee2 not found: ID does not exist" Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.355889 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-znf25"] Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.364641 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-znf25"] Oct 02 11:29:59 crc kubenswrapper[4783]: I1002 11:29:59.559180 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" path="/var/lib/kubelet/pods/e5d9a0b6-64c2-4a13-8f58-c671f7818ab3/volumes" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.160375 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf"] Oct 02 11:30:00 crc kubenswrapper[4783]: E1002 11:30:00.160907 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="extract-utilities" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.160926 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="extract-utilities" Oct 02 11:30:00 crc kubenswrapper[4783]: E1002 11:30:00.160954 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="extract-content" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.160963 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="extract-content" Oct 02 11:30:00 crc kubenswrapper[4783]: E1002 11:30:00.160993 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="extract-utilities" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.161002 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="extract-utilities" Oct 02 11:30:00 crc kubenswrapper[4783]: E1002 11:30:00.161016 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="extract-content" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.161024 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="extract-content" Oct 02 11:30:00 crc kubenswrapper[4783]: E1002 11:30:00.161036 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="registry-server" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.161043 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="registry-server" Oct 02 11:30:00 crc kubenswrapper[4783]: E1002 11:30:00.161064 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="registry-server" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.161077 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="registry-server" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.161321 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5d9a0b6-64c2-4a13-8f58-c671f7818ab3" containerName="registry-server" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.161346 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="953fb357-7583-465b-ad75-ea86cc5a60a4" containerName="registry-server" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.162180 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.169209 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.170765 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf"] Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.172967 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.246991 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/00976817-6948-47a6-9a09-1aa2c5c96052-secret-volume\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.247259 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00976817-6948-47a6-9a09-1aa2c5c96052-config-volume\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.247380 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbr9b\" (UniqueName: \"kubernetes.io/projected/00976817-6948-47a6-9a09-1aa2c5c96052-kube-api-access-qbr9b\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.349163 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/00976817-6948-47a6-9a09-1aa2c5c96052-secret-volume\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.349243 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00976817-6948-47a6-9a09-1aa2c5c96052-config-volume\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.349272 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbr9b\" (UniqueName: \"kubernetes.io/projected/00976817-6948-47a6-9a09-1aa2c5c96052-kube-api-access-qbr9b\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.350310 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00976817-6948-47a6-9a09-1aa2c5c96052-config-volume\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.353748 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/00976817-6948-47a6-9a09-1aa2c5c96052-secret-volume\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.367599 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbr9b\" (UniqueName: \"kubernetes.io/projected/00976817-6948-47a6-9a09-1aa2c5c96052-kube-api-access-qbr9b\") pod \"collect-profiles-29323410-6skmf\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.481425 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:00 crc kubenswrapper[4783]: I1002 11:30:00.992302 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf"] Oct 02 11:30:01 crc kubenswrapper[4783]: I1002 11:30:01.035149 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" event={"ID":"00976817-6948-47a6-9a09-1aa2c5c96052","Type":"ContainerStarted","Data":"ad4c7c51448c49ff96d38f6a1bd95811dcf8b40204a3231190d37726921d2dab"} Oct 02 11:30:02 crc kubenswrapper[4783]: I1002 11:30:02.046387 4783 generic.go:334] "Generic (PLEG): container finished" podID="00976817-6948-47a6-9a09-1aa2c5c96052" containerID="ad81ccbdb85c148d0039bc7ecfc41cb9d704f30e17a6b3d4e8c3a51fc1d92375" exitCode=0 Oct 02 11:30:02 crc kubenswrapper[4783]: I1002 11:30:02.046459 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" event={"ID":"00976817-6948-47a6-9a09-1aa2c5c96052","Type":"ContainerDied","Data":"ad81ccbdb85c148d0039bc7ecfc41cb9d704f30e17a6b3d4e8c3a51fc1d92375"} Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.412881 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.519414 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/00976817-6948-47a6-9a09-1aa2c5c96052-secret-volume\") pod \"00976817-6948-47a6-9a09-1aa2c5c96052\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.519654 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00976817-6948-47a6-9a09-1aa2c5c96052-config-volume\") pod \"00976817-6948-47a6-9a09-1aa2c5c96052\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.519686 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbr9b\" (UniqueName: \"kubernetes.io/projected/00976817-6948-47a6-9a09-1aa2c5c96052-kube-api-access-qbr9b\") pod \"00976817-6948-47a6-9a09-1aa2c5c96052\" (UID: \"00976817-6948-47a6-9a09-1aa2c5c96052\") " Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.520464 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00976817-6948-47a6-9a09-1aa2c5c96052-config-volume" (OuterVolumeSpecName: "config-volume") pod "00976817-6948-47a6-9a09-1aa2c5c96052" (UID: "00976817-6948-47a6-9a09-1aa2c5c96052"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.525479 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/00976817-6948-47a6-9a09-1aa2c5c96052-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "00976817-6948-47a6-9a09-1aa2c5c96052" (UID: "00976817-6948-47a6-9a09-1aa2c5c96052"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.525641 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00976817-6948-47a6-9a09-1aa2c5c96052-kube-api-access-qbr9b" (OuterVolumeSpecName: "kube-api-access-qbr9b") pod "00976817-6948-47a6-9a09-1aa2c5c96052" (UID: "00976817-6948-47a6-9a09-1aa2c5c96052"). InnerVolumeSpecName "kube-api-access-qbr9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.622670 4783 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/00976817-6948-47a6-9a09-1aa2c5c96052-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.623068 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/00976817-6948-47a6-9a09-1aa2c5c96052-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:03 crc kubenswrapper[4783]: I1002 11:30:03.623083 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbr9b\" (UniqueName: \"kubernetes.io/projected/00976817-6948-47a6-9a09-1aa2c5c96052-kube-api-access-qbr9b\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:04 crc kubenswrapper[4783]: I1002 11:30:04.064968 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" event={"ID":"00976817-6948-47a6-9a09-1aa2c5c96052","Type":"ContainerDied","Data":"ad4c7c51448c49ff96d38f6a1bd95811dcf8b40204a3231190d37726921d2dab"} Oct 02 11:30:04 crc kubenswrapper[4783]: I1002 11:30:04.065007 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ad4c7c51448c49ff96d38f6a1bd95811dcf8b40204a3231190d37726921d2dab" Oct 02 11:30:04 crc kubenswrapper[4783]: I1002 11:30:04.065020 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf" Oct 02 11:30:04 crc kubenswrapper[4783]: I1002 11:30:04.494760 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld"] Oct 02 11:30:04 crc kubenswrapper[4783]: I1002 11:30:04.502979 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323365-465ld"] Oct 02 11:30:05 crc kubenswrapper[4783]: I1002 11:30:05.118250 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:30:05 crc kubenswrapper[4783]: I1002 11:30:05.175917 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:30:05 crc kubenswrapper[4783]: I1002 11:30:05.356377 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgk9l"] Oct 02 11:30:05 crc kubenswrapper[4783]: I1002 11:30:05.558242 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17c756cd-c7ba-4efa-850d-7a9aff74099d" path="/var/lib/kubelet/pods/17c756cd-c7ba-4efa-850d-7a9aff74099d/volumes" Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.090765 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-hgk9l" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" containerID="cri-o://c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63" gracePeriod=2 Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.569571 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.711841 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-catalog-content\") pod \"4a537075-67c8-4620-a04b-ce3bff07d4c9\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.711945 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-utilities\") pod \"4a537075-67c8-4620-a04b-ce3bff07d4c9\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.712051 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n49zh\" (UniqueName: \"kubernetes.io/projected/4a537075-67c8-4620-a04b-ce3bff07d4c9-kube-api-access-n49zh\") pod \"4a537075-67c8-4620-a04b-ce3bff07d4c9\" (UID: \"4a537075-67c8-4620-a04b-ce3bff07d4c9\") " Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.713324 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-utilities" (OuterVolumeSpecName: "utilities") pod "4a537075-67c8-4620-a04b-ce3bff07d4c9" (UID: "4a537075-67c8-4620-a04b-ce3bff07d4c9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.717572 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4a537075-67c8-4620-a04b-ce3bff07d4c9-kube-api-access-n49zh" (OuterVolumeSpecName: "kube-api-access-n49zh") pod "4a537075-67c8-4620-a04b-ce3bff07d4c9" (UID: "4a537075-67c8-4620-a04b-ce3bff07d4c9"). InnerVolumeSpecName "kube-api-access-n49zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.806761 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4a537075-67c8-4620-a04b-ce3bff07d4c9" (UID: "4a537075-67c8-4620-a04b-ce3bff07d4c9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.814238 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n49zh\" (UniqueName: \"kubernetes.io/projected/4a537075-67c8-4620-a04b-ce3bff07d4c9-kube-api-access-n49zh\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.814473 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:07 crc kubenswrapper[4783]: I1002 11:30:07.814536 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4a537075-67c8-4620-a04b-ce3bff07d4c9-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.100615 4783 generic.go:334] "Generic (PLEG): container finished" podID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerID="c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63" exitCode=0 Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.100676 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-hgk9l" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.100701 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgk9l" event={"ID":"4a537075-67c8-4620-a04b-ce3bff07d4c9","Type":"ContainerDied","Data":"c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63"} Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.100737 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-hgk9l" event={"ID":"4a537075-67c8-4620-a04b-ce3bff07d4c9","Type":"ContainerDied","Data":"9268e768295510119a6e850dde3eb87fdb61881b33b5a2677edd66be80e7a3c2"} Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.100758 4783 scope.go:117] "RemoveContainer" containerID="c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.145000 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-hgk9l"] Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.146531 4783 scope.go:117] "RemoveContainer" containerID="2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.153371 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-hgk9l"] Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.168792 4783 scope.go:117] "RemoveContainer" containerID="3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.218046 4783 scope.go:117] "RemoveContainer" containerID="c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63" Oct 02 11:30:08 crc kubenswrapper[4783]: E1002 11:30:08.218492 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63\": container with ID starting with c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63 not found: ID does not exist" containerID="c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.218525 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63"} err="failed to get container status \"c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63\": rpc error: code = NotFound desc = could not find container \"c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63\": container with ID starting with c1249dcf7804412da3509548da1a3af27f440ee1aee27e002f642621c50c5a63 not found: ID does not exist" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.218573 4783 scope.go:117] "RemoveContainer" containerID="2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1" Oct 02 11:30:08 crc kubenswrapper[4783]: E1002 11:30:08.219060 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1\": container with ID starting with 2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1 not found: ID does not exist" containerID="2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.219105 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1"} err="failed to get container status \"2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1\": rpc error: code = NotFound desc = could not find container \"2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1\": container with ID starting with 2153a0f1fe12989eaab0945e4fec9d75cb692a0a506cd6d4331f4dd352bd3ec1 not found: ID does not exist" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.219129 4783 scope.go:117] "RemoveContainer" containerID="3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0" Oct 02 11:30:08 crc kubenswrapper[4783]: E1002 11:30:08.219530 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0\": container with ID starting with 3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0 not found: ID does not exist" containerID="3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0" Oct 02 11:30:08 crc kubenswrapper[4783]: I1002 11:30:08.219557 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0"} err="failed to get container status \"3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0\": rpc error: code = NotFound desc = could not find container \"3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0\": container with ID starting with 3d13babbc255adbfc16d131af2846fa4a3b11726699a1cfc625adc6f03be59a0 not found: ID does not exist" Oct 02 11:30:09 crc kubenswrapper[4783]: I1002 11:30:09.558584 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" path="/var/lib/kubelet/pods/4a537075-67c8-4620-a04b-ce3bff07d4c9/volumes" Oct 02 11:30:21 crc kubenswrapper[4783]: I1002 11:30:21.224939 4783 generic.go:334] "Generic (PLEG): container finished" podID="29cd1f95-5f6b-451a-81a4-d78e56e04c43" containerID="c14993878e51a2eb7cce083d1877c4ebd79da571fc4ee55cb87a58a1720b6620" exitCode=2 Oct 02 11:30:21 crc kubenswrapper[4783]: I1002 11:30:21.225034 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" event={"ID":"29cd1f95-5f6b-451a-81a4-d78e56e04c43","Type":"ContainerDied","Data":"c14993878e51a2eb7cce083d1877c4ebd79da571fc4ee55cb87a58a1720b6620"} Oct 02 11:30:21 crc kubenswrapper[4783]: I1002 11:30:21.513068 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:30:21 crc kubenswrapper[4783]: I1002 11:30:21.513129 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:30:21 crc kubenswrapper[4783]: I1002 11:30:21.513170 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:30:21 crc kubenswrapper[4783]: I1002 11:30:21.513845 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:30:21 crc kubenswrapper[4783]: I1002 11:30:21.513903 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" gracePeriod=600 Oct 02 11:30:22 crc kubenswrapper[4783]: E1002 11:30:22.179854 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.235017 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" exitCode=0 Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.235100 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa"} Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.235469 4783 scope.go:117] "RemoveContainer" containerID="4273188ae108ec0f85de3e8dc29d684f9057cc5d4a801982cba2234f8872c085" Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.236085 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:30:22 crc kubenswrapper[4783]: E1002 11:30:22.236371 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.722716 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.912507 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-inventory\") pod \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.912863 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch65w\" (UniqueName: \"kubernetes.io/projected/29cd1f95-5f6b-451a-81a4-d78e56e04c43-kube-api-access-ch65w\") pod \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.912952 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-ssh-key\") pod \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\" (UID: \"29cd1f95-5f6b-451a-81a4-d78e56e04c43\") " Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.919885 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29cd1f95-5f6b-451a-81a4-d78e56e04c43-kube-api-access-ch65w" (OuterVolumeSpecName: "kube-api-access-ch65w") pod "29cd1f95-5f6b-451a-81a4-d78e56e04c43" (UID: "29cd1f95-5f6b-451a-81a4-d78e56e04c43"). InnerVolumeSpecName "kube-api-access-ch65w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.948163 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-inventory" (OuterVolumeSpecName: "inventory") pod "29cd1f95-5f6b-451a-81a4-d78e56e04c43" (UID: "29cd1f95-5f6b-451a-81a4-d78e56e04c43"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:30:22 crc kubenswrapper[4783]: I1002 11:30:22.952246 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "29cd1f95-5f6b-451a-81a4-d78e56e04c43" (UID: "29cd1f95-5f6b-451a-81a4-d78e56e04c43"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:30:23 crc kubenswrapper[4783]: I1002 11:30:23.016446 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch65w\" (UniqueName: \"kubernetes.io/projected/29cd1f95-5f6b-451a-81a4-d78e56e04c43-kube-api-access-ch65w\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:23 crc kubenswrapper[4783]: I1002 11:30:23.016642 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:23 crc kubenswrapper[4783]: I1002 11:30:23.016755 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/29cd1f95-5f6b-451a-81a4-d78e56e04c43-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:30:23 crc kubenswrapper[4783]: I1002 11:30:23.247263 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" event={"ID":"29cd1f95-5f6b-451a-81a4-d78e56e04c43","Type":"ContainerDied","Data":"ed97454cfa896a22fc66a518724327382030ce50f9d259a68a6b3ee18aad9363"} Oct 02 11:30:23 crc kubenswrapper[4783]: I1002 11:30:23.247318 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ed97454cfa896a22fc66a518724327382030ce50f9d259a68a6b3ee18aad9363" Oct 02 11:30:23 crc kubenswrapper[4783]: I1002 11:30:23.247326 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.033918 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj"] Oct 02 11:30:31 crc kubenswrapper[4783]: E1002 11:30:31.036016 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="extract-content" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.036127 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="extract-content" Oct 02 11:30:31 crc kubenswrapper[4783]: E1002 11:30:31.036224 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29cd1f95-5f6b-451a-81a4-d78e56e04c43" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.036344 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="29cd1f95-5f6b-451a-81a4-d78e56e04c43" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:30:31 crc kubenswrapper[4783]: E1002 11:30:31.036599 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="extract-utilities" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.036700 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="extract-utilities" Oct 02 11:30:31 crc kubenswrapper[4783]: E1002 11:30:31.036795 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.036875 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" Oct 02 11:30:31 crc kubenswrapper[4783]: E1002 11:30:31.036977 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00976817-6948-47a6-9a09-1aa2c5c96052" containerName="collect-profiles" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.037059 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="00976817-6948-47a6-9a09-1aa2c5c96052" containerName="collect-profiles" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.037435 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="00976817-6948-47a6-9a09-1aa2c5c96052" containerName="collect-profiles" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.037560 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="29cd1f95-5f6b-451a-81a4-d78e56e04c43" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.037667 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4a537075-67c8-4620-a04b-ce3bff07d4c9" containerName="registry-server" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.038545 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.049290 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.049492 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.050359 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj"] Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.050982 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.051399 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.177366 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.177448 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-znl9b\" (UniqueName: \"kubernetes.io/projected/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-kube-api-access-znl9b\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.177474 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.279345 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.279436 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-znl9b\" (UniqueName: \"kubernetes.io/projected/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-kube-api-access-znl9b\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.279467 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.288064 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-inventory\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.289192 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-ssh-key\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.297206 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-znl9b\" (UniqueName: \"kubernetes.io/projected/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-kube-api-access-znl9b\") pod \"configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.367921 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:30:31 crc kubenswrapper[4783]: I1002 11:30:31.920123 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj"] Oct 02 11:30:32 crc kubenswrapper[4783]: I1002 11:30:32.336202 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" event={"ID":"654acc46-b6ca-40e2-a3be-1a6fb20ecd90","Type":"ContainerStarted","Data":"b7d26f07136571f0d76b26a25506a7c2f7e73d461096e9fd5197840430d49eae"} Oct 02 11:30:32 crc kubenswrapper[4783]: I1002 11:30:32.336519 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" event={"ID":"654acc46-b6ca-40e2-a3be-1a6fb20ecd90","Type":"ContainerStarted","Data":"86329f0daf7c5c209a0980c71afb4f3ece12ed591546b5d8210a548fd856cf21"} Oct 02 11:30:32 crc kubenswrapper[4783]: I1002 11:30:32.354841 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" podStartSLOduration=1.138077042 podStartE2EDuration="1.354825301s" podCreationTimestamp="2025-10-02 11:30:31 +0000 UTC" firstStartedPulling="2025-10-02 11:30:31.922187086 +0000 UTC m=+2265.238381347" lastFinishedPulling="2025-10-02 11:30:32.138935345 +0000 UTC m=+2265.455129606" observedRunningTime="2025-10-02 11:30:32.351046618 +0000 UTC m=+2265.667240879" watchObservedRunningTime="2025-10-02 11:30:32.354825301 +0000 UTC m=+2265.671019562" Oct 02 11:30:33 crc kubenswrapper[4783]: I1002 11:30:33.549940 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:30:33 crc kubenswrapper[4783]: E1002 11:30:33.550555 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:30:46 crc kubenswrapper[4783]: I1002 11:30:46.545927 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:30:46 crc kubenswrapper[4783]: E1002 11:30:46.547883 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:30:56 crc kubenswrapper[4783]: I1002 11:30:56.092336 4783 scope.go:117] "RemoveContainer" containerID="98458d31b71ea30aaf4b018b587b8b2dcd58993f2eff3826b846d75a89ca6613" Oct 02 11:30:59 crc kubenswrapper[4783]: I1002 11:30:59.544927 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:30:59 crc kubenswrapper[4783]: E1002 11:30:59.545896 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:31:12 crc kubenswrapper[4783]: I1002 11:31:12.545487 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:31:12 crc kubenswrapper[4783]: E1002 11:31:12.546270 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:31:21 crc kubenswrapper[4783]: I1002 11:31:21.759295 4783 generic.go:334] "Generic (PLEG): container finished" podID="654acc46-b6ca-40e2-a3be-1a6fb20ecd90" containerID="b7d26f07136571f0d76b26a25506a7c2f7e73d461096e9fd5197840430d49eae" exitCode=0 Oct 02 11:31:21 crc kubenswrapper[4783]: I1002 11:31:21.759364 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" event={"ID":"654acc46-b6ca-40e2-a3be-1a6fb20ecd90","Type":"ContainerDied","Data":"b7d26f07136571f0d76b26a25506a7c2f7e73d461096e9fd5197840430d49eae"} Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.164970 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.286926 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-inventory\") pod \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.286987 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-znl9b\" (UniqueName: \"kubernetes.io/projected/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-kube-api-access-znl9b\") pod \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.287199 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-ssh-key\") pod \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\" (UID: \"654acc46-b6ca-40e2-a3be-1a6fb20ecd90\") " Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.293321 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-kube-api-access-znl9b" (OuterVolumeSpecName: "kube-api-access-znl9b") pod "654acc46-b6ca-40e2-a3be-1a6fb20ecd90" (UID: "654acc46-b6ca-40e2-a3be-1a6fb20ecd90"). InnerVolumeSpecName "kube-api-access-znl9b". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.314789 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "654acc46-b6ca-40e2-a3be-1a6fb20ecd90" (UID: "654acc46-b6ca-40e2-a3be-1a6fb20ecd90"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.315643 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-inventory" (OuterVolumeSpecName: "inventory") pod "654acc46-b6ca-40e2-a3be-1a6fb20ecd90" (UID: "654acc46-b6ca-40e2-a3be-1a6fb20ecd90"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.390426 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.390461 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-znl9b\" (UniqueName: \"kubernetes.io/projected/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-kube-api-access-znl9b\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.390473 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/654acc46-b6ca-40e2-a3be-1a6fb20ecd90-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.779090 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" event={"ID":"654acc46-b6ca-40e2-a3be-1a6fb20ecd90","Type":"ContainerDied","Data":"86329f0daf7c5c209a0980c71afb4f3ece12ed591546b5d8210a548fd856cf21"} Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.779134 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="86329f0daf7c5c209a0980c71afb4f3ece12ed591546b5d8210a548fd856cf21" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.779151 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.855727 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-bpw9h"] Oct 02 11:31:23 crc kubenswrapper[4783]: E1002 11:31:23.856361 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="654acc46-b6ca-40e2-a3be-1a6fb20ecd90" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.856642 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="654acc46-b6ca-40e2-a3be-1a6fb20ecd90" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.856915 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="654acc46-b6ca-40e2-a3be-1a6fb20ecd90" containerName="configure-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.857633 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.861207 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.861840 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.862443 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.862840 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:31:23 crc kubenswrapper[4783]: I1002 11:31:23.871386 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-bpw9h"] Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.009647 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vgkgq\" (UniqueName: \"kubernetes.io/projected/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-kube-api-access-vgkgq\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.010132 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.010226 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.111994 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.112076 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.112125 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vgkgq\" (UniqueName: \"kubernetes.io/projected/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-kube-api-access-vgkgq\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.118940 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-inventory-0\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.120510 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-ssh-key-openstack-edpm-ipam\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.128863 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vgkgq\" (UniqueName: \"kubernetes.io/projected/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-kube-api-access-vgkgq\") pod \"ssh-known-hosts-edpm-deployment-bpw9h\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.196171 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.729380 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ssh-known-hosts-edpm-deployment-bpw9h"] Oct 02 11:31:24 crc kubenswrapper[4783]: I1002 11:31:24.788156 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" event={"ID":"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8","Type":"ContainerStarted","Data":"f7a56d35d10756451e600ae32c6d3d199c7559580730d0335c6474d48c35bd0e"} Oct 02 11:31:25 crc kubenswrapper[4783]: I1002 11:31:25.545318 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:31:25 crc kubenswrapper[4783]: E1002 11:31:25.545907 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:31:25 crc kubenswrapper[4783]: I1002 11:31:25.797466 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" event={"ID":"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8","Type":"ContainerStarted","Data":"8b7fc32e00d5a64bf2186d22d7f865530ab9c43580c07293fa64116908bcf7cd"} Oct 02 11:31:25 crc kubenswrapper[4783]: I1002 11:31:25.820591 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" podStartSLOduration=2.61094693 podStartE2EDuration="2.820567746s" podCreationTimestamp="2025-10-02 11:31:23 +0000 UTC" firstStartedPulling="2025-10-02 11:31:24.740613275 +0000 UTC m=+2318.056807536" lastFinishedPulling="2025-10-02 11:31:24.950234091 +0000 UTC m=+2318.266428352" observedRunningTime="2025-10-02 11:31:25.816906667 +0000 UTC m=+2319.133100938" watchObservedRunningTime="2025-10-02 11:31:25.820567746 +0000 UTC m=+2319.136762007" Oct 02 11:31:32 crc kubenswrapper[4783]: I1002 11:31:32.860826 4783 generic.go:334] "Generic (PLEG): container finished" podID="1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8" containerID="8b7fc32e00d5a64bf2186d22d7f865530ab9c43580c07293fa64116908bcf7cd" exitCode=0 Oct 02 11:31:32 crc kubenswrapper[4783]: I1002 11:31:32.861036 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" event={"ID":"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8","Type":"ContainerDied","Data":"8b7fc32e00d5a64bf2186d22d7f865530ab9c43580c07293fa64116908bcf7cd"} Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.274246 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.417541 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vgkgq\" (UniqueName: \"kubernetes.io/projected/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-kube-api-access-vgkgq\") pod \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.417647 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-inventory-0\") pod \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.417749 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-ssh-key-openstack-edpm-ipam\") pod \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\" (UID: \"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8\") " Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.423603 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-kube-api-access-vgkgq" (OuterVolumeSpecName: "kube-api-access-vgkgq") pod "1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8" (UID: "1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8"). InnerVolumeSpecName "kube-api-access-vgkgq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.463159 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-inventory-0" (OuterVolumeSpecName: "inventory-0") pod "1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8" (UID: "1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8"). InnerVolumeSpecName "inventory-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.466131 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-ssh-key-openstack-edpm-ipam" (OuterVolumeSpecName: "ssh-key-openstack-edpm-ipam") pod "1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8" (UID: "1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8"). InnerVolumeSpecName "ssh-key-openstack-edpm-ipam". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.519986 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vgkgq\" (UniqueName: \"kubernetes.io/projected/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-kube-api-access-vgkgq\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.520023 4783 reconciler_common.go:293] "Volume detached for volume \"inventory-0\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-inventory-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.520038 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key-openstack-edpm-ipam\" (UniqueName: \"kubernetes.io/secret/1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8-ssh-key-openstack-edpm-ipam\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.880044 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" event={"ID":"1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8","Type":"ContainerDied","Data":"f7a56d35d10756451e600ae32c6d3d199c7559580730d0335c6474d48c35bd0e"} Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.880459 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f7a56d35d10756451e600ae32c6d3d199c7559580730d0335c6474d48c35bd0e" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.880379 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ssh-known-hosts-edpm-deployment-bpw9h" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.974630 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf"] Oct 02 11:31:34 crc kubenswrapper[4783]: E1002 11:31:34.975053 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8" containerName="ssh-known-hosts-edpm-deployment" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.975070 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8" containerName="ssh-known-hosts-edpm-deployment" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.975242 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8" containerName="ssh-known-hosts-edpm-deployment" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.980184 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.985721 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.985951 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.986228 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.986513 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:31:34 crc kubenswrapper[4783]: I1002 11:31:34.994754 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf"] Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.130627 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.131004 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6jknp\" (UniqueName: \"kubernetes.io/projected/a82c04b0-2ff6-4514-b241-3c068b2a577d-kube-api-access-6jknp\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.131132 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.232870 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.232955 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6jknp\" (UniqueName: \"kubernetes.io/projected/a82c04b0-2ff6-4514-b241-3c068b2a577d-kube-api-access-6jknp\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.233036 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.237543 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-ssh-key\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.237884 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-inventory\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.256205 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6jknp\" (UniqueName: \"kubernetes.io/projected/a82c04b0-2ff6-4514-b241-3c068b2a577d-kube-api-access-6jknp\") pod \"run-os-edpm-deployment-openstack-edpm-ipam-vqjwf\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.309634 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.835188 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf"] Oct 02 11:31:35 crc kubenswrapper[4783]: I1002 11:31:35.888649 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" event={"ID":"a82c04b0-2ff6-4514-b241-3c068b2a577d","Type":"ContainerStarted","Data":"861d4536302f60091bcfdd6091c517ba9a2c129f5ca17f0d14510d20aa605698"} Oct 02 11:31:36 crc kubenswrapper[4783]: I1002 11:31:36.899531 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" event={"ID":"a82c04b0-2ff6-4514-b241-3c068b2a577d","Type":"ContainerStarted","Data":"cc36653afabcb1ee0b23563fbd9eac997e0ba2d2cd05d00f8c59b4efa93a36b1"} Oct 02 11:31:36 crc kubenswrapper[4783]: I1002 11:31:36.920248 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" podStartSLOduration=2.766073594 podStartE2EDuration="2.920225271s" podCreationTimestamp="2025-10-02 11:31:34 +0000 UTC" firstStartedPulling="2025-10-02 11:31:35.845767569 +0000 UTC m=+2329.161961830" lastFinishedPulling="2025-10-02 11:31:35.999919246 +0000 UTC m=+2329.316113507" observedRunningTime="2025-10-02 11:31:36.919265585 +0000 UTC m=+2330.235459886" watchObservedRunningTime="2025-10-02 11:31:36.920225271 +0000 UTC m=+2330.236419552" Oct 02 11:31:38 crc kubenswrapper[4783]: I1002 11:31:38.545444 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:31:38 crc kubenswrapper[4783]: E1002 11:31:38.545992 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:31:44 crc kubenswrapper[4783]: I1002 11:31:44.969519 4783 generic.go:334] "Generic (PLEG): container finished" podID="a82c04b0-2ff6-4514-b241-3c068b2a577d" containerID="cc36653afabcb1ee0b23563fbd9eac997e0ba2d2cd05d00f8c59b4efa93a36b1" exitCode=0 Oct 02 11:31:44 crc kubenswrapper[4783]: I1002 11:31:44.969604 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" event={"ID":"a82c04b0-2ff6-4514-b241-3c068b2a577d","Type":"ContainerDied","Data":"cc36653afabcb1ee0b23563fbd9eac997e0ba2d2cd05d00f8c59b4efa93a36b1"} Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.373477 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.551341 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-inventory\") pod \"a82c04b0-2ff6-4514-b241-3c068b2a577d\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.551768 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6jknp\" (UniqueName: \"kubernetes.io/projected/a82c04b0-2ff6-4514-b241-3c068b2a577d-kube-api-access-6jknp\") pod \"a82c04b0-2ff6-4514-b241-3c068b2a577d\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.551807 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-ssh-key\") pod \"a82c04b0-2ff6-4514-b241-3c068b2a577d\" (UID: \"a82c04b0-2ff6-4514-b241-3c068b2a577d\") " Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.557687 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a82c04b0-2ff6-4514-b241-3c068b2a577d-kube-api-access-6jknp" (OuterVolumeSpecName: "kube-api-access-6jknp") pod "a82c04b0-2ff6-4514-b241-3c068b2a577d" (UID: "a82c04b0-2ff6-4514-b241-3c068b2a577d"). InnerVolumeSpecName "kube-api-access-6jknp". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.579160 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-inventory" (OuterVolumeSpecName: "inventory") pod "a82c04b0-2ff6-4514-b241-3c068b2a577d" (UID: "a82c04b0-2ff6-4514-b241-3c068b2a577d"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.598294 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "a82c04b0-2ff6-4514-b241-3c068b2a577d" (UID: "a82c04b0-2ff6-4514-b241-3c068b2a577d"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.656101 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.656136 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6jknp\" (UniqueName: \"kubernetes.io/projected/a82c04b0-2ff6-4514-b241-3c068b2a577d-kube-api-access-6jknp\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.656149 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/a82c04b0-2ff6-4514-b241-3c068b2a577d-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.996210 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" event={"ID":"a82c04b0-2ff6-4514-b241-3c068b2a577d","Type":"ContainerDied","Data":"861d4536302f60091bcfdd6091c517ba9a2c129f5ca17f0d14510d20aa605698"} Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.996584 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="861d4536302f60091bcfdd6091c517ba9a2c129f5ca17f0d14510d20aa605698" Oct 02 11:31:46 crc kubenswrapper[4783]: I1002 11:31:46.996557 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/run-os-edpm-deployment-openstack-edpm-ipam-vqjwf" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.161373 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd"] Oct 02 11:31:47 crc kubenswrapper[4783]: E1002 11:31:47.161785 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a82c04b0-2ff6-4514-b241-3c068b2a577d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.161805 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="a82c04b0-2ff6-4514-b241-3c068b2a577d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.162025 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="a82c04b0-2ff6-4514-b241-3c068b2a577d" containerName="run-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.162668 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.165395 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.165637 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.165745 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.166878 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd"] Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.169956 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.266945 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.267077 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.267098 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bw9wm\" (UniqueName: \"kubernetes.io/projected/b96ddc9e-8288-4843-86c3-85caab8d78af-kube-api-access-bw9wm\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.368946 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.369285 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bw9wm\" (UniqueName: \"kubernetes.io/projected/b96ddc9e-8288-4843-86c3-85caab8d78af-kube-api-access-bw9wm\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.369642 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.376318 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-ssh-key\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.376762 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-inventory\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.386735 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bw9wm\" (UniqueName: \"kubernetes.io/projected/b96ddc9e-8288-4843-86c3-85caab8d78af-kube-api-access-bw9wm\") pod \"reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.483004 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:31:47 crc kubenswrapper[4783]: I1002 11:31:47.492086 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:31:48 crc kubenswrapper[4783]: I1002 11:31:48.123258 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd"] Oct 02 11:31:48 crc kubenswrapper[4783]: I1002 11:31:48.305248 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:31:49 crc kubenswrapper[4783]: I1002 11:31:49.020902 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" event={"ID":"b96ddc9e-8288-4843-86c3-85caab8d78af","Type":"ContainerStarted","Data":"91cf3d5de2ea894e290c84380a8f398864d566de5d6be1c790871ff6bb7576bf"} Oct 02 11:31:49 crc kubenswrapper[4783]: I1002 11:31:49.021301 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" event={"ID":"b96ddc9e-8288-4843-86c3-85caab8d78af","Type":"ContainerStarted","Data":"c873d63b0b75e9d43f5368f72ded34a3023b02dcf8bedd8de6295c4119d6dbf2"} Oct 02 11:31:49 crc kubenswrapper[4783]: I1002 11:31:49.067251 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" podStartSLOduration=1.889110114 podStartE2EDuration="2.067190558s" podCreationTimestamp="2025-10-02 11:31:47 +0000 UTC" firstStartedPulling="2025-10-02 11:31:48.12532443 +0000 UTC m=+2341.441518691" lastFinishedPulling="2025-10-02 11:31:48.303404874 +0000 UTC m=+2341.619599135" observedRunningTime="2025-10-02 11:31:49.044209227 +0000 UTC m=+2342.360403488" watchObservedRunningTime="2025-10-02 11:31:49.067190558 +0000 UTC m=+2342.383384839" Oct 02 11:31:51 crc kubenswrapper[4783]: I1002 11:31:51.544741 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:31:51 crc kubenswrapper[4783]: E1002 11:31:51.545531 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:31:59 crc kubenswrapper[4783]: I1002 11:31:59.123323 4783 generic.go:334] "Generic (PLEG): container finished" podID="b96ddc9e-8288-4843-86c3-85caab8d78af" containerID="91cf3d5de2ea894e290c84380a8f398864d566de5d6be1c790871ff6bb7576bf" exitCode=0 Oct 02 11:31:59 crc kubenswrapper[4783]: I1002 11:31:59.123385 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" event={"ID":"b96ddc9e-8288-4843-86c3-85caab8d78af","Type":"ContainerDied","Data":"91cf3d5de2ea894e290c84380a8f398864d566de5d6be1c790871ff6bb7576bf"} Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.508126 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.643591 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bw9wm\" (UniqueName: \"kubernetes.io/projected/b96ddc9e-8288-4843-86c3-85caab8d78af-kube-api-access-bw9wm\") pod \"b96ddc9e-8288-4843-86c3-85caab8d78af\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.643733 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-ssh-key\") pod \"b96ddc9e-8288-4843-86c3-85caab8d78af\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.643896 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-inventory\") pod \"b96ddc9e-8288-4843-86c3-85caab8d78af\" (UID: \"b96ddc9e-8288-4843-86c3-85caab8d78af\") " Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.649666 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b96ddc9e-8288-4843-86c3-85caab8d78af-kube-api-access-bw9wm" (OuterVolumeSpecName: "kube-api-access-bw9wm") pod "b96ddc9e-8288-4843-86c3-85caab8d78af" (UID: "b96ddc9e-8288-4843-86c3-85caab8d78af"). InnerVolumeSpecName "kube-api-access-bw9wm". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.681185 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "b96ddc9e-8288-4843-86c3-85caab8d78af" (UID: "b96ddc9e-8288-4843-86c3-85caab8d78af"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.696308 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-inventory" (OuterVolumeSpecName: "inventory") pod "b96ddc9e-8288-4843-86c3-85caab8d78af" (UID: "b96ddc9e-8288-4843-86c3-85caab8d78af"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.746022 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.746056 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/b96ddc9e-8288-4843-86c3-85caab8d78af-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:00 crc kubenswrapper[4783]: I1002 11:32:00.746066 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bw9wm\" (UniqueName: \"kubernetes.io/projected/b96ddc9e-8288-4843-86c3-85caab8d78af-kube-api-access-bw9wm\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.141483 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" event={"ID":"b96ddc9e-8288-4843-86c3-85caab8d78af","Type":"ContainerDied","Data":"c873d63b0b75e9d43f5368f72ded34a3023b02dcf8bedd8de6295c4119d6dbf2"} Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.141525 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c873d63b0b75e9d43f5368f72ded34a3023b02dcf8bedd8de6295c4119d6dbf2" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.141536 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.239699 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf"] Oct 02 11:32:01 crc kubenswrapper[4783]: E1002 11:32:01.240262 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b96ddc9e-8288-4843-86c3-85caab8d78af" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.240285 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b96ddc9e-8288-4843-86c3-85caab8d78af" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.240541 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b96ddc9e-8288-4843-86c3-85caab8d78af" containerName="reboot-os-edpm-deployment-openstack-edpm-ipam" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.241432 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.244218 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-libvirt-default-certs-0" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.244555 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-ovn-default-certs-0" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.244879 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-neutron-metadata-default-certs-0" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.245704 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.245873 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.246055 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.246501 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-telemetry-default-certs-0" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.248365 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.249942 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf"] Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.359662 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.359751 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.359784 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.359980 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360047 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360199 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360258 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360462 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360611 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360716 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360751 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.360842 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.361009 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.361111 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c574w\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-kube-api-access-c574w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.463982 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.464057 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.464086 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.464131 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.464176 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.464217 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c574w\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-kube-api-access-c574w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.464761 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.465495 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.465528 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.465577 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.465602 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.465745 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.465777 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.465841 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.469760 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-libvirt-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.472257 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-telemetry-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.470038 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ssh-key\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.469933 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-neutron-metadata-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.473813 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-nova-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.474692 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.476266 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ovn-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.476705 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.476851 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.478746 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-repo-setup-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.478778 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-bootstrap-combined-ca-bundle\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.479367 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.479877 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-inventory\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.483740 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c574w\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-kube-api-access-c574w\") pod \"install-certs-edpm-deployment-openstack-edpm-ipam-f26lf\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:01 crc kubenswrapper[4783]: I1002 11:32:01.564852 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:02 crc kubenswrapper[4783]: I1002 11:32:02.069611 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf"] Oct 02 11:32:02 crc kubenswrapper[4783]: I1002 11:32:02.074988 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:32:02 crc kubenswrapper[4783]: I1002 11:32:02.149438 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" event={"ID":"3fa822ea-7474-41b4-8203-6089f1eb37cc","Type":"ContainerStarted","Data":"9a0cc9523d9fdc2889bb2c2d7567fbbbe6ac4f042fd788c3b66afc32e83dcdf2"} Oct 02 11:32:02 crc kubenswrapper[4783]: I1002 11:32:02.545833 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:32:02 crc kubenswrapper[4783]: E1002 11:32:02.546313 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:32:03 crc kubenswrapper[4783]: I1002 11:32:03.162216 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" event={"ID":"3fa822ea-7474-41b4-8203-6089f1eb37cc","Type":"ContainerStarted","Data":"b0ff347e7aaa0527a5eef1a706d4b225c310e89e1e15985b563dfe0605c9c2ad"} Oct 02 11:32:03 crc kubenswrapper[4783]: I1002 11:32:03.188235 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" podStartSLOduration=2.001676862 podStartE2EDuration="2.188217524s" podCreationTimestamp="2025-10-02 11:32:01 +0000 UTC" firstStartedPulling="2025-10-02 11:32:02.074783049 +0000 UTC m=+2355.390977310" lastFinishedPulling="2025-10-02 11:32:02.261323711 +0000 UTC m=+2355.577517972" observedRunningTime="2025-10-02 11:32:03.182633003 +0000 UTC m=+2356.498827264" watchObservedRunningTime="2025-10-02 11:32:03.188217524 +0000 UTC m=+2356.504411785" Oct 02 11:32:13 crc kubenswrapper[4783]: I1002 11:32:13.545370 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:32:13 crc kubenswrapper[4783]: E1002 11:32:13.546205 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:32:25 crc kubenswrapper[4783]: I1002 11:32:25.544980 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:32:25 crc kubenswrapper[4783]: E1002 11:32:25.546617 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:32:39 crc kubenswrapper[4783]: I1002 11:32:39.545468 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:32:39 crc kubenswrapper[4783]: E1002 11:32:39.546304 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:32:40 crc kubenswrapper[4783]: I1002 11:32:40.476389 4783 generic.go:334] "Generic (PLEG): container finished" podID="3fa822ea-7474-41b4-8203-6089f1eb37cc" containerID="b0ff347e7aaa0527a5eef1a706d4b225c310e89e1e15985b563dfe0605c9c2ad" exitCode=0 Oct 02 11:32:40 crc kubenswrapper[4783]: I1002 11:32:40.476451 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" event={"ID":"3fa822ea-7474-41b4-8203-6089f1eb37cc","Type":"ContainerDied","Data":"b0ff347e7aaa0527a5eef1a706d4b225c310e89e1e15985b563dfe0605c9c2ad"} Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.871853 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.907153 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-neutron-metadata-combined-ca-bundle\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.907527 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-repo-setup-combined-ca-bundle\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.907656 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ovn-combined-ca-bundle\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.907737 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.907857 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-bootstrap-combined-ca-bundle\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.907951 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-nova-combined-ca-bundle\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.908059 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-libvirt-default-certs-0\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.908127 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-telemetry-default-certs-0\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.908203 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c574w\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-kube-api-access-c574w\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.908290 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-inventory\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.908373 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ssh-key\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.908507 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-ovn-default-certs-0\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.909107 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-telemetry-combined-ca-bundle\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.909154 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-libvirt-combined-ca-bundle\") pod \"3fa822ea-7474-41b4-8203-6089f1eb37cc\" (UID: \"3fa822ea-7474-41b4-8203-6089f1eb37cc\") " Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.914238 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.919266 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.919557 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-neutron-metadata-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-neutron-metadata-default-certs-0") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "openstack-edpm-ipam-neutron-metadata-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.920759 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-bootstrap-combined-ca-bundle" (OuterVolumeSpecName: "bootstrap-combined-ca-bundle") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "bootstrap-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.920983 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-repo-setup-combined-ca-bundle" (OuterVolumeSpecName: "repo-setup-combined-ca-bundle") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "repo-setup-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.922088 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-kube-api-access-c574w" (OuterVolumeSpecName: "kube-api-access-c574w") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "kube-api-access-c574w". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.924194 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-ovn-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-ovn-default-certs-0") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "openstack-edpm-ipam-ovn-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.924798 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-telemetry-combined-ca-bundle" (OuterVolumeSpecName: "telemetry-combined-ca-bundle") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "telemetry-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.926775 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-telemetry-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-telemetry-default-certs-0") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "openstack-edpm-ipam-telemetry-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.927125 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-libvirt-default-certs-0" (OuterVolumeSpecName: "openstack-edpm-ipam-libvirt-default-certs-0") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "openstack-edpm-ipam-libvirt-default-certs-0". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.928661 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.928694 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.951267 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:41 crc kubenswrapper[4783]: I1002 11:32:41.955624 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-inventory" (OuterVolumeSpecName: "inventory") pod "3fa822ea-7474-41b4-8203-6089f1eb37cc" (UID: "3fa822ea-7474-41b4-8203-6089f1eb37cc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.012359 4783 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.012619 4783 reconciler_common.go:293] "Volume detached for volume \"repo-setup-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-repo-setup-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.012706 4783 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.012789 4783 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-neutron-metadata-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-neutron-metadata-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.012912 4783 reconciler_common.go:293] "Volume detached for volume \"bootstrap-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-bootstrap-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.012998 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013086 4783 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-libvirt-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-libvirt-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013166 4783 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-telemetry-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-telemetry-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013248 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c574w\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-kube-api-access-c574w\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013342 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013464 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013561 4783 reconciler_common.go:293] "Volume detached for volume \"openstack-edpm-ipam-ovn-default-certs-0\" (UniqueName: \"kubernetes.io/projected/3fa822ea-7474-41b4-8203-6089f1eb37cc-openstack-edpm-ipam-ovn-default-certs-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013654 4783 reconciler_common.go:293] "Volume detached for volume \"telemetry-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-telemetry-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.013743 4783 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3fa822ea-7474-41b4-8203-6089f1eb37cc-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.496544 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" event={"ID":"3fa822ea-7474-41b4-8203-6089f1eb37cc","Type":"ContainerDied","Data":"9a0cc9523d9fdc2889bb2c2d7567fbbbe6ac4f042fd788c3b66afc32e83dcdf2"} Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.496583 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="9a0cc9523d9fdc2889bb2c2d7567fbbbe6ac4f042fd788c3b66afc32e83dcdf2" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.496623 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/install-certs-edpm-deployment-openstack-edpm-ipam-f26lf" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.603939 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc"] Oct 02 11:32:42 crc kubenswrapper[4783]: E1002 11:32:42.605444 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3fa822ea-7474-41b4-8203-6089f1eb37cc" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.605466 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="3fa822ea-7474-41b4-8203-6089f1eb37cc" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.605688 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="3fa822ea-7474-41b4-8203-6089f1eb37cc" containerName="install-certs-edpm-deployment-openstack-edpm-ipam" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.606291 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.610652 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.610745 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.611118 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-config" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.611968 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.612337 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.614689 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc"] Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.725444 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ch425\" (UniqueName: \"kubernetes.io/projected/caba4df9-1229-497f-8dcd-07434b0c9664-kube-api-access-ch425\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.725719 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/caba4df9-1229-497f-8dcd-07434b0c9664-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.725836 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.726002 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.726151 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.827695 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.827830 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ch425\" (UniqueName: \"kubernetes.io/projected/caba4df9-1229-497f-8dcd-07434b0c9664-kube-api-access-ch425\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.827880 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/caba4df9-1229-497f-8dcd-07434b0c9664-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.827908 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.828404 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.829848 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/caba4df9-1229-497f-8dcd-07434b0c9664-ovncontroller-config-0\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.843276 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ssh-key\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.845024 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ovn-combined-ca-bundle\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.845168 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-inventory\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.846238 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ch425\" (UniqueName: \"kubernetes.io/projected/caba4df9-1229-497f-8dcd-07434b0c9664-kube-api-access-ch425\") pod \"ovn-edpm-deployment-openstack-edpm-ipam-8npzc\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:42 crc kubenswrapper[4783]: I1002 11:32:42.925038 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:32:43 crc kubenswrapper[4783]: I1002 11:32:43.430244 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc"] Oct 02 11:32:43 crc kubenswrapper[4783]: I1002 11:32:43.505529 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" event={"ID":"caba4df9-1229-497f-8dcd-07434b0c9664","Type":"ContainerStarted","Data":"85432b5a4f06ce07ac9484de03df6b1b5a69ef9cbd17b1f7bc2f556a956d5e35"} Oct 02 11:32:44 crc kubenswrapper[4783]: I1002 11:32:44.514168 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" event={"ID":"caba4df9-1229-497f-8dcd-07434b0c9664","Type":"ContainerStarted","Data":"562231842b5fa48de8e0cf6edd44b0b44dca11b62a550f4f8abe97eb186a4316"} Oct 02 11:32:44 crc kubenswrapper[4783]: I1002 11:32:44.537510 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" podStartSLOduration=2.393048289 podStartE2EDuration="2.537492539s" podCreationTimestamp="2025-10-02 11:32:42 +0000 UTC" firstStartedPulling="2025-10-02 11:32:43.442546596 +0000 UTC m=+2396.758740867" lastFinishedPulling="2025-10-02 11:32:43.586990856 +0000 UTC m=+2396.903185117" observedRunningTime="2025-10-02 11:32:44.533680108 +0000 UTC m=+2397.849874369" watchObservedRunningTime="2025-10-02 11:32:44.537492539 +0000 UTC m=+2397.853686800" Oct 02 11:32:54 crc kubenswrapper[4783]: I1002 11:32:54.545650 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:32:54 crc kubenswrapper[4783]: E1002 11:32:54.546311 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:33:09 crc kubenswrapper[4783]: I1002 11:33:09.545301 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:33:09 crc kubenswrapper[4783]: E1002 11:33:09.546140 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:33:20 crc kubenswrapper[4783]: I1002 11:33:20.544918 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:33:20 crc kubenswrapper[4783]: E1002 11:33:20.545759 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:33:33 crc kubenswrapper[4783]: I1002 11:33:33.544930 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:33:33 crc kubenswrapper[4783]: E1002 11:33:33.545886 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:33:43 crc kubenswrapper[4783]: I1002 11:33:43.004349 4783 generic.go:334] "Generic (PLEG): container finished" podID="caba4df9-1229-497f-8dcd-07434b0c9664" containerID="562231842b5fa48de8e0cf6edd44b0b44dca11b62a550f4f8abe97eb186a4316" exitCode=0 Oct 02 11:33:43 crc kubenswrapper[4783]: I1002 11:33:43.004448 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" event={"ID":"caba4df9-1229-497f-8dcd-07434b0c9664","Type":"ContainerDied","Data":"562231842b5fa48de8e0cf6edd44b0b44dca11b62a550f4f8abe97eb186a4316"} Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.482677 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.501378 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ssh-key\") pod \"caba4df9-1229-497f-8dcd-07434b0c9664\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.501666 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ch425\" (UniqueName: \"kubernetes.io/projected/caba4df9-1229-497f-8dcd-07434b0c9664-kube-api-access-ch425\") pod \"caba4df9-1229-497f-8dcd-07434b0c9664\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.501776 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/caba4df9-1229-497f-8dcd-07434b0c9664-ovncontroller-config-0\") pod \"caba4df9-1229-497f-8dcd-07434b0c9664\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.501914 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ovn-combined-ca-bundle\") pod \"caba4df9-1229-497f-8dcd-07434b0c9664\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.502024 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-inventory\") pod \"caba4df9-1229-497f-8dcd-07434b0c9664\" (UID: \"caba4df9-1229-497f-8dcd-07434b0c9664\") " Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.508256 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ovn-combined-ca-bundle" (OuterVolumeSpecName: "ovn-combined-ca-bundle") pod "caba4df9-1229-497f-8dcd-07434b0c9664" (UID: "caba4df9-1229-497f-8dcd-07434b0c9664"). InnerVolumeSpecName "ovn-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.530191 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/caba4df9-1229-497f-8dcd-07434b0c9664-ovncontroller-config-0" (OuterVolumeSpecName: "ovncontroller-config-0") pod "caba4df9-1229-497f-8dcd-07434b0c9664" (UID: "caba4df9-1229-497f-8dcd-07434b0c9664"). InnerVolumeSpecName "ovncontroller-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.530826 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/caba4df9-1229-497f-8dcd-07434b0c9664-kube-api-access-ch425" (OuterVolumeSpecName: "kube-api-access-ch425") pod "caba4df9-1229-497f-8dcd-07434b0c9664" (UID: "caba4df9-1229-497f-8dcd-07434b0c9664"). InnerVolumeSpecName "kube-api-access-ch425". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.553481 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "caba4df9-1229-497f-8dcd-07434b0c9664" (UID: "caba4df9-1229-497f-8dcd-07434b0c9664"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.561625 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-inventory" (OuterVolumeSpecName: "inventory") pod "caba4df9-1229-497f-8dcd-07434b0c9664" (UID: "caba4df9-1229-497f-8dcd-07434b0c9664"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.604358 4783 reconciler_common.go:293] "Volume detached for volume \"ovn-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ovn-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.604633 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.604721 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/caba4df9-1229-497f-8dcd-07434b0c9664-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.604801 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ch425\" (UniqueName: \"kubernetes.io/projected/caba4df9-1229-497f-8dcd-07434b0c9664-kube-api-access-ch425\") on node \"crc\" DevicePath \"\"" Oct 02 11:33:44 crc kubenswrapper[4783]: I1002 11:33:44.604887 4783 reconciler_common.go:293] "Volume detached for volume \"ovncontroller-config-0\" (UniqueName: \"kubernetes.io/configmap/caba4df9-1229-497f-8dcd-07434b0c9664-ovncontroller-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.021003 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" event={"ID":"caba4df9-1229-497f-8dcd-07434b0c9664","Type":"ContainerDied","Data":"85432b5a4f06ce07ac9484de03df6b1b5a69ef9cbd17b1f7bc2f556a956d5e35"} Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.021042 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="85432b5a4f06ce07ac9484de03df6b1b5a69ef9cbd17b1f7bc2f556a956d5e35" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.021097 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-edpm-deployment-openstack-edpm-ipam-8npzc" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.122876 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq"] Oct 02 11:33:45 crc kubenswrapper[4783]: E1002 11:33:45.123398 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="caba4df9-1229-497f-8dcd-07434b0c9664" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.123441 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="caba4df9-1229-497f-8dcd-07434b0c9664" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.123656 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="caba4df9-1229-497f-8dcd-07434b0c9664" containerName="ovn-edpm-deployment-openstack-edpm-ipam" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.124527 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.126932 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-ovn-metadata-agent-neutron-config" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.127126 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.127361 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.127542 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-neutron-config" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.127509 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.127688 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.130921 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq"] Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.213344 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.213442 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.213478 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.213499 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.213527 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztt6p\" (UniqueName: \"kubernetes.io/projected/271e221f-74f1-4a26-9c0a-1f867d5b56e4-kube-api-access-ztt6p\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.213632 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.315260 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztt6p\" (UniqueName: \"kubernetes.io/projected/271e221f-74f1-4a26-9c0a-1f867d5b56e4-kube-api-access-ztt6p\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.315434 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.315531 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.315596 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.315644 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.315679 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.320190 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-inventory\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.320448 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-nova-metadata-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.320721 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.328009 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-ssh-key\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.329388 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-metadata-combined-ca-bundle\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.336748 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztt6p\" (UniqueName: \"kubernetes.io/projected/271e221f-74f1-4a26-9c0a-1f867d5b56e4-kube-api-access-ztt6p\") pod \"neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.442799 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.545126 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:33:45 crc kubenswrapper[4783]: E1002 11:33:45.545618 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:33:45 crc kubenswrapper[4783]: I1002 11:33:45.919633 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq"] Oct 02 11:33:46 crc kubenswrapper[4783]: I1002 11:33:46.029995 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" event={"ID":"271e221f-74f1-4a26-9c0a-1f867d5b56e4","Type":"ContainerStarted","Data":"5a54a6dd168605e74b058785b5db7f6b4c13641fa9328ca36c9608f4730d5093"} Oct 02 11:33:47 crc kubenswrapper[4783]: I1002 11:33:47.040388 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" event={"ID":"271e221f-74f1-4a26-9c0a-1f867d5b56e4","Type":"ContainerStarted","Data":"030789d890060c478da52d1ddf0047253ec02d022ce43a55bf5b107482042e3f"} Oct 02 11:33:47 crc kubenswrapper[4783]: I1002 11:33:47.063574 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" podStartSLOduration=1.8862597700000001 podStartE2EDuration="2.063557409s" podCreationTimestamp="2025-10-02 11:33:45 +0000 UTC" firstStartedPulling="2025-10-02 11:33:45.933587861 +0000 UTC m=+2459.249782122" lastFinishedPulling="2025-10-02 11:33:46.1108855 +0000 UTC m=+2459.427079761" observedRunningTime="2025-10-02 11:33:47.057710864 +0000 UTC m=+2460.373905125" watchObservedRunningTime="2025-10-02 11:33:47.063557409 +0000 UTC m=+2460.379751670" Oct 02 11:33:56 crc kubenswrapper[4783]: I1002 11:33:56.545848 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:33:56 crc kubenswrapper[4783]: E1002 11:33:56.547081 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:34:07 crc kubenswrapper[4783]: I1002 11:34:07.569631 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:34:07 crc kubenswrapper[4783]: E1002 11:34:07.570541 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:34:19 crc kubenswrapper[4783]: I1002 11:34:19.544916 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:34:19 crc kubenswrapper[4783]: E1002 11:34:19.546098 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:34:26 crc kubenswrapper[4783]: I1002 11:34:26.376959 4783 generic.go:334] "Generic (PLEG): container finished" podID="271e221f-74f1-4a26-9c0a-1f867d5b56e4" containerID="030789d890060c478da52d1ddf0047253ec02d022ce43a55bf5b107482042e3f" exitCode=0 Oct 02 11:34:26 crc kubenswrapper[4783]: I1002 11:34:26.377064 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" event={"ID":"271e221f-74f1-4a26-9c0a-1f867d5b56e4","Type":"ContainerDied","Data":"030789d890060c478da52d1ddf0047253ec02d022ce43a55bf5b107482042e3f"} Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.778182 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.925199 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ztt6p\" (UniqueName: \"kubernetes.io/projected/271e221f-74f1-4a26-9c0a-1f867d5b56e4-kube-api-access-ztt6p\") pod \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.925274 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-ovn-metadata-agent-neutron-config-0\") pod \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.925555 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-nova-metadata-neutron-config-0\") pod \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.925743 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-metadata-combined-ca-bundle\") pod \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.926583 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-inventory\") pod \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.926619 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-ssh-key\") pod \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\" (UID: \"271e221f-74f1-4a26-9c0a-1f867d5b56e4\") " Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.931672 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/271e221f-74f1-4a26-9c0a-1f867d5b56e4-kube-api-access-ztt6p" (OuterVolumeSpecName: "kube-api-access-ztt6p") pod "271e221f-74f1-4a26-9c0a-1f867d5b56e4" (UID: "271e221f-74f1-4a26-9c0a-1f867d5b56e4"). InnerVolumeSpecName "kube-api-access-ztt6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.933434 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-metadata-combined-ca-bundle" (OuterVolumeSpecName: "neutron-metadata-combined-ca-bundle") pod "271e221f-74f1-4a26-9c0a-1f867d5b56e4" (UID: "271e221f-74f1-4a26-9c0a-1f867d5b56e4"). InnerVolumeSpecName "neutron-metadata-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.956885 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-nova-metadata-neutron-config-0" (OuterVolumeSpecName: "nova-metadata-neutron-config-0") pod "271e221f-74f1-4a26-9c0a-1f867d5b56e4" (UID: "271e221f-74f1-4a26-9c0a-1f867d5b56e4"). InnerVolumeSpecName "nova-metadata-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.968037 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "271e221f-74f1-4a26-9c0a-1f867d5b56e4" (UID: "271e221f-74f1-4a26-9c0a-1f867d5b56e4"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.969976 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-ovn-metadata-agent-neutron-config-0" (OuterVolumeSpecName: "neutron-ovn-metadata-agent-neutron-config-0") pod "271e221f-74f1-4a26-9c0a-1f867d5b56e4" (UID: "271e221f-74f1-4a26-9c0a-1f867d5b56e4"). InnerVolumeSpecName "neutron-ovn-metadata-agent-neutron-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:34:27 crc kubenswrapper[4783]: I1002 11:34:27.977542 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-inventory" (OuterVolumeSpecName: "inventory") pod "271e221f-74f1-4a26-9c0a-1f867d5b56e4" (UID: "271e221f-74f1-4a26-9c0a-1f867d5b56e4"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.029649 4783 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-nova-metadata-neutron-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.029685 4783 reconciler_common.go:293] "Volume detached for volume \"neutron-metadata-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-metadata-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.029699 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.029711 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.029724 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ztt6p\" (UniqueName: \"kubernetes.io/projected/271e221f-74f1-4a26-9c0a-1f867d5b56e4-kube-api-access-ztt6p\") on node \"crc\" DevicePath \"\"" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.029737 4783 reconciler_common.go:293] "Volume detached for volume \"neutron-ovn-metadata-agent-neutron-config-0\" (UniqueName: \"kubernetes.io/secret/271e221f-74f1-4a26-9c0a-1f867d5b56e4-neutron-ovn-metadata-agent-neutron-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.399522 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" event={"ID":"271e221f-74f1-4a26-9c0a-1f867d5b56e4","Type":"ContainerDied","Data":"5a54a6dd168605e74b058785b5db7f6b4c13641fa9328ca36c9608f4730d5093"} Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.399903 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5a54a6dd168605e74b058785b5db7f6b4c13641fa9328ca36c9608f4730d5093" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.399575 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.484719 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df"] Oct 02 11:34:28 crc kubenswrapper[4783]: E1002 11:34:28.485160 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="271e221f-74f1-4a26-9c0a-1f867d5b56e4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.485178 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="271e221f-74f1-4a26-9c0a-1f867d5b56e4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.485368 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="271e221f-74f1-4a26-9c0a-1f867d5b56e4" containerName="neutron-metadata-edpm-deployment-openstack-edpm-ipam" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.485969 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.489654 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.489813 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.489926 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.490055 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"libvirt-secret" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.490278 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.504043 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df"] Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.642610 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9n5vf\" (UniqueName: \"kubernetes.io/projected/2399815c-c05e-4429-a4e2-163eb5893cc0-kube-api-access-9n5vf\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.642678 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.642751 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.643066 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.643148 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.745043 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.745107 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9n5vf\" (UniqueName: \"kubernetes.io/projected/2399815c-c05e-4429-a4e2-163eb5893cc0-kube-api-access-9n5vf\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.745143 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.745263 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.745832 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.748725 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-secret-0\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.749683 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-inventory\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.751761 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-combined-ca-bundle\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.752519 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-ssh-key\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.767828 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9n5vf\" (UniqueName: \"kubernetes.io/projected/2399815c-c05e-4429-a4e2-163eb5893cc0-kube-api-access-9n5vf\") pod \"libvirt-edpm-deployment-openstack-edpm-ipam-976df\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:28 crc kubenswrapper[4783]: I1002 11:34:28.808136 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:34:29 crc kubenswrapper[4783]: I1002 11:34:29.369479 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df"] Oct 02 11:34:29 crc kubenswrapper[4783]: I1002 11:34:29.410057 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" event={"ID":"2399815c-c05e-4429-a4e2-163eb5893cc0","Type":"ContainerStarted","Data":"19f7b4333e118ab88b9ff56df6f2ceaffc7aea3ae359dd2843fdfbea55275cfb"} Oct 02 11:34:30 crc kubenswrapper[4783]: I1002 11:34:30.421504 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" event={"ID":"2399815c-c05e-4429-a4e2-163eb5893cc0","Type":"ContainerStarted","Data":"cc26e9422d62e4fea59e11e75a13f3205412183c0be44e0aa56066875147e228"} Oct 02 11:34:30 crc kubenswrapper[4783]: I1002 11:34:30.447802 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" podStartSLOduration=2.259113835 podStartE2EDuration="2.447779096s" podCreationTimestamp="2025-10-02 11:34:28 +0000 UTC" firstStartedPulling="2025-10-02 11:34:29.384246485 +0000 UTC m=+2502.700440756" lastFinishedPulling="2025-10-02 11:34:29.572911756 +0000 UTC m=+2502.889106017" observedRunningTime="2025-10-02 11:34:30.446892753 +0000 UTC m=+2503.763087014" watchObservedRunningTime="2025-10-02 11:34:30.447779096 +0000 UTC m=+2503.763973357" Oct 02 11:34:30 crc kubenswrapper[4783]: I1002 11:34:30.544816 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:34:30 crc kubenswrapper[4783]: E1002 11:34:30.545130 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:34:43 crc kubenswrapper[4783]: I1002 11:34:43.545104 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:34:43 crc kubenswrapper[4783]: E1002 11:34:43.545869 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:34:58 crc kubenswrapper[4783]: I1002 11:34:58.545524 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:34:58 crc kubenswrapper[4783]: E1002 11:34:58.546382 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:35:11 crc kubenswrapper[4783]: I1002 11:35:11.545220 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:35:11 crc kubenswrapper[4783]: E1002 11:35:11.546057 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:35:26 crc kubenswrapper[4783]: I1002 11:35:26.545507 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:35:26 crc kubenswrapper[4783]: I1002 11:35:26.937932 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"ef440e982275f435833cad6571cb19a94a65f2aa9dda0af7e8553bbcd9f7b29c"} Oct 02 11:37:51 crc kubenswrapper[4783]: I1002 11:37:51.514176 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:37:51 crc kubenswrapper[4783]: I1002 11:37:51.514802 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:38:21 crc kubenswrapper[4783]: I1002 11:38:21.514268 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:38:21 crc kubenswrapper[4783]: I1002 11:38:21.514838 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.513766 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.514394 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.514468 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.515079 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"ef440e982275f435833cad6571cb19a94a65f2aa9dda0af7e8553bbcd9f7b29c"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.515139 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://ef440e982275f435833cad6571cb19a94a65f2aa9dda0af7e8553bbcd9f7b29c" gracePeriod=600 Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.746932 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="ef440e982275f435833cad6571cb19a94a65f2aa9dda0af7e8553bbcd9f7b29c" exitCode=0 Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.747326 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"ef440e982275f435833cad6571cb19a94a65f2aa9dda0af7e8553bbcd9f7b29c"} Oct 02 11:38:51 crc kubenswrapper[4783]: I1002 11:38:51.747365 4783 scope.go:117] "RemoveContainer" containerID="5f41be414b60ced507542f945d0dbbae4870dae2d8ee2c71db9491f9f0b17cfa" Oct 02 11:38:52 crc kubenswrapper[4783]: I1002 11:38:52.758900 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9"} Oct 02 11:39:01 crc kubenswrapper[4783]: I1002 11:39:01.825578 4783 generic.go:334] "Generic (PLEG): container finished" podID="2399815c-c05e-4429-a4e2-163eb5893cc0" containerID="cc26e9422d62e4fea59e11e75a13f3205412183c0be44e0aa56066875147e228" exitCode=0 Oct 02 11:39:01 crc kubenswrapper[4783]: I1002 11:39:01.825715 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" event={"ID":"2399815c-c05e-4429-a4e2-163eb5893cc0","Type":"ContainerDied","Data":"cc26e9422d62e4fea59e11e75a13f3205412183c0be44e0aa56066875147e228"} Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.240510 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.324519 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-combined-ca-bundle\") pod \"2399815c-c05e-4429-a4e2-163eb5893cc0\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.324582 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-ssh-key\") pod \"2399815c-c05e-4429-a4e2-163eb5893cc0\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.324710 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-inventory\") pod \"2399815c-c05e-4429-a4e2-163eb5893cc0\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.324785 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-secret-0\") pod \"2399815c-c05e-4429-a4e2-163eb5893cc0\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.324843 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9n5vf\" (UniqueName: \"kubernetes.io/projected/2399815c-c05e-4429-a4e2-163eb5893cc0-kube-api-access-9n5vf\") pod \"2399815c-c05e-4429-a4e2-163eb5893cc0\" (UID: \"2399815c-c05e-4429-a4e2-163eb5893cc0\") " Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.330763 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-combined-ca-bundle" (OuterVolumeSpecName: "libvirt-combined-ca-bundle") pod "2399815c-c05e-4429-a4e2-163eb5893cc0" (UID: "2399815c-c05e-4429-a4e2-163eb5893cc0"). InnerVolumeSpecName "libvirt-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.334687 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2399815c-c05e-4429-a4e2-163eb5893cc0-kube-api-access-9n5vf" (OuterVolumeSpecName: "kube-api-access-9n5vf") pod "2399815c-c05e-4429-a4e2-163eb5893cc0" (UID: "2399815c-c05e-4429-a4e2-163eb5893cc0"). InnerVolumeSpecName "kube-api-access-9n5vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.355305 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-secret-0" (OuterVolumeSpecName: "libvirt-secret-0") pod "2399815c-c05e-4429-a4e2-163eb5893cc0" (UID: "2399815c-c05e-4429-a4e2-163eb5893cc0"). InnerVolumeSpecName "libvirt-secret-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.357450 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2399815c-c05e-4429-a4e2-163eb5893cc0" (UID: "2399815c-c05e-4429-a4e2-163eb5893cc0"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.368000 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-inventory" (OuterVolumeSpecName: "inventory") pod "2399815c-c05e-4429-a4e2-163eb5893cc0" (UID: "2399815c-c05e-4429-a4e2-163eb5893cc0"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.426628 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.426658 4783 reconciler_common.go:293] "Volume detached for volume \"libvirt-secret-0\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-secret-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.426668 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9n5vf\" (UniqueName: \"kubernetes.io/projected/2399815c-c05e-4429-a4e2-163eb5893cc0-kube-api-access-9n5vf\") on node \"crc\" DevicePath \"\"" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.426676 4783 reconciler_common.go:293] "Volume detached for volume \"libvirt-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-libvirt-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.426685 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2399815c-c05e-4429-a4e2-163eb5893cc0-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.850603 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" event={"ID":"2399815c-c05e-4429-a4e2-163eb5893cc0","Type":"ContainerDied","Data":"19f7b4333e118ab88b9ff56df6f2ceaffc7aea3ae359dd2843fdfbea55275cfb"} Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.851006 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="19f7b4333e118ab88b9ff56df6f2ceaffc7aea3ae359dd2843fdfbea55275cfb" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.850726 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/libvirt-edpm-deployment-openstack-edpm-ipam-976df" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.985922 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6"] Oct 02 11:39:03 crc kubenswrapper[4783]: E1002 11:39:03.986307 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2399815c-c05e-4429-a4e2-163eb5893cc0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.986331 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="2399815c-c05e-4429-a4e2-163eb5893cc0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.986573 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="2399815c-c05e-4429-a4e2-163eb5893cc0" containerName="libvirt-edpm-deployment-openstack-edpm-ipam" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.987217 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.989122 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.989331 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.989570 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.990603 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.991101 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.991668 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:39:03 crc kubenswrapper[4783]: I1002 11:39:03.992363 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.017407 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6"] Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037191 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037244 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037304 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037428 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rs8vk\" (UniqueName: \"kubernetes.io/projected/7f8d39f6-ae36-44ac-87ce-da7542ace825-kube-api-access-rs8vk\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037482 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037533 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037580 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037642 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.037682 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139336 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139406 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139473 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139608 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rs8vk\" (UniqueName: \"kubernetes.io/projected/7f8d39f6-ae36-44ac-87ce-da7542ace825-kube-api-access-rs8vk\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139668 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139719 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139789 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.139874 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.140338 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.140981 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.143166 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.143337 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.144304 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.147246 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.147936 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.159200 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.163142 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.186251 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rs8vk\" (UniqueName: \"kubernetes.io/projected/7f8d39f6-ae36-44ac-87ce-da7542ace825-kube-api-access-rs8vk\") pod \"nova-edpm-deployment-openstack-edpm-ipam-xp2l6\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.306651 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.860731 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6"] Oct 02 11:39:04 crc kubenswrapper[4783]: I1002 11:39:04.874226 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:39:05 crc kubenswrapper[4783]: I1002 11:39:05.874610 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" event={"ID":"7f8d39f6-ae36-44ac-87ce-da7542ace825","Type":"ContainerStarted","Data":"5e20a05649a7a8cf2e209b5e8a0f39caeae24a5485117341ef2bd60af5b0c32d"} Oct 02 11:39:05 crc kubenswrapper[4783]: I1002 11:39:05.874989 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" event={"ID":"7f8d39f6-ae36-44ac-87ce-da7542ace825","Type":"ContainerStarted","Data":"7371cd1a1944674ef82ded4c620401b3c144a8620dab4c876de70f559db3b5ab"} Oct 02 11:39:05 crc kubenswrapper[4783]: I1002 11:39:05.897590 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" podStartSLOduration=2.685530398 podStartE2EDuration="2.897574026s" podCreationTimestamp="2025-10-02 11:39:03 +0000 UTC" firstStartedPulling="2025-10-02 11:39:04.874001962 +0000 UTC m=+2778.190196223" lastFinishedPulling="2025-10-02 11:39:05.08604559 +0000 UTC m=+2778.402239851" observedRunningTime="2025-10-02 11:39:05.894368701 +0000 UTC m=+2779.210562962" watchObservedRunningTime="2025-10-02 11:39:05.897574026 +0000 UTC m=+2779.213768287" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.470213 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j7k4q"] Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.473677 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.484935 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j7k4q"] Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.533640 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjq7q\" (UniqueName: \"kubernetes.io/projected/29a87f3e-4167-42e2-95fa-91cd28df9e05-kube-api-access-sjq7q\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.533750 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-catalog-content\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.533790 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-utilities\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.635547 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-catalog-content\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.635621 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-utilities\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.635849 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjq7q\" (UniqueName: \"kubernetes.io/projected/29a87f3e-4167-42e2-95fa-91cd28df9e05-kube-api-access-sjq7q\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.636646 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-catalog-content\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.636999 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-utilities\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.659265 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-wfw7q"] Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.661275 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.676006 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjq7q\" (UniqueName: \"kubernetes.io/projected/29a87f3e-4167-42e2-95fa-91cd28df9e05-kube-api-access-sjq7q\") pod \"redhat-operators-j7k4q\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.718465 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wfw7q"] Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.738703 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-catalog-content\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.738770 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-utilities\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.738858 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzhg7\" (UniqueName: \"kubernetes.io/projected/4692d534-cd1c-4ee0-97ba-4edd45d26f51-kube-api-access-dzhg7\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.794824 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.842626 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-utilities\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.843187 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzhg7\" (UniqueName: \"kubernetes.io/projected/4692d534-cd1c-4ee0-97ba-4edd45d26f51-kube-api-access-dzhg7\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.843329 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-utilities\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.843719 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-catalog-content\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.844424 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-catalog-content\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:34 crc kubenswrapper[4783]: I1002 11:39:34.870290 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzhg7\" (UniqueName: \"kubernetes.io/projected/4692d534-cd1c-4ee0-97ba-4edd45d26f51-kube-api-access-dzhg7\") pod \"community-operators-wfw7q\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:35 crc kubenswrapper[4783]: I1002 11:39:35.038481 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:35 crc kubenswrapper[4783]: I1002 11:39:35.188819 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j7k4q"] Oct 02 11:39:35 crc kubenswrapper[4783]: I1002 11:39:35.576590 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-wfw7q"] Oct 02 11:39:36 crc kubenswrapper[4783]: I1002 11:39:36.136875 4783 generic.go:334] "Generic (PLEG): container finished" podID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerID="478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6" exitCode=0 Oct 02 11:39:36 crc kubenswrapper[4783]: I1002 11:39:36.136939 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfw7q" event={"ID":"4692d534-cd1c-4ee0-97ba-4edd45d26f51","Type":"ContainerDied","Data":"478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6"} Oct 02 11:39:36 crc kubenswrapper[4783]: I1002 11:39:36.136969 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfw7q" event={"ID":"4692d534-cd1c-4ee0-97ba-4edd45d26f51","Type":"ContainerStarted","Data":"46ccf0e62388bb3ce909959762a5652789ceb9039712c9fadcbf7e7cd683cbaa"} Oct 02 11:39:36 crc kubenswrapper[4783]: I1002 11:39:36.139185 4783 generic.go:334] "Generic (PLEG): container finished" podID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerID="5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540" exitCode=0 Oct 02 11:39:36 crc kubenswrapper[4783]: I1002 11:39:36.139210 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7k4q" event={"ID":"29a87f3e-4167-42e2-95fa-91cd28df9e05","Type":"ContainerDied","Data":"5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540"} Oct 02 11:39:36 crc kubenswrapper[4783]: I1002 11:39:36.139227 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7k4q" event={"ID":"29a87f3e-4167-42e2-95fa-91cd28df9e05","Type":"ContainerStarted","Data":"4895a8ef413a3e4dabc7239eb6de70eede7da60a5646e433651d1583b89f583d"} Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.062161 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-dprk2"] Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.064874 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.079122 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dprk2"] Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.100212 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-catalog-content\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.100356 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-utilities\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.100545 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxfzh\" (UniqueName: \"kubernetes.io/projected/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-kube-api-access-kxfzh\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.160516 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfw7q" event={"ID":"4692d534-cd1c-4ee0-97ba-4edd45d26f51","Type":"ContainerStarted","Data":"2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b"} Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.206294 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxfzh\" (UniqueName: \"kubernetes.io/projected/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-kube-api-access-kxfzh\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.206476 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-catalog-content\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.206541 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-utilities\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.207231 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-catalog-content\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.207863 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-utilities\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.228030 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxfzh\" (UniqueName: \"kubernetes.io/projected/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-kube-api-access-kxfzh\") pod \"certified-operators-dprk2\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.398929 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.691608 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-l4qhd"] Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.706822 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.748333 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4qhd"] Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.820445 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-utilities\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.820588 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l7rv7\" (UniqueName: \"kubernetes.io/projected/8f43840a-f4c5-42df-a160-b931a2348c7a-kube-api-access-l7rv7\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.820648 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-catalog-content\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.849277 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-dprk2"] Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.922544 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-catalog-content\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.922698 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-utilities\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.922774 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l7rv7\" (UniqueName: \"kubernetes.io/projected/8f43840a-f4c5-42df-a160-b931a2348c7a-kube-api-access-l7rv7\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.923625 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-catalog-content\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.923907 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-utilities\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:37 crc kubenswrapper[4783]: I1002 11:39:37.963068 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l7rv7\" (UniqueName: \"kubernetes.io/projected/8f43840a-f4c5-42df-a160-b931a2348c7a-kube-api-access-l7rv7\") pod \"redhat-marketplace-l4qhd\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:38 crc kubenswrapper[4783]: I1002 11:39:38.037472 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:38 crc kubenswrapper[4783]: I1002 11:39:38.175397 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dprk2" event={"ID":"2b0fc9d5-91fa-465e-8cf1-107248e61ea1","Type":"ContainerStarted","Data":"3ad238e1b57169894ba6643479c3b6f25d01c2151c7c7e4afd766a07c709f09f"} Oct 02 11:39:38 crc kubenswrapper[4783]: I1002 11:39:38.184498 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7k4q" event={"ID":"29a87f3e-4167-42e2-95fa-91cd28df9e05","Type":"ContainerStarted","Data":"a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab"} Oct 02 11:39:38 crc kubenswrapper[4783]: I1002 11:39:38.503316 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4qhd"] Oct 02 11:39:38 crc kubenswrapper[4783]: W1002 11:39:38.511974 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8f43840a_f4c5_42df_a160_b931a2348c7a.slice/crio-de0bb734e2d66cc075aa7b105de9aa2296f64c218116aa1e8527d7f0eb33c235 WatchSource:0}: Error finding container de0bb734e2d66cc075aa7b105de9aa2296f64c218116aa1e8527d7f0eb33c235: Status 404 returned error can't find the container with id de0bb734e2d66cc075aa7b105de9aa2296f64c218116aa1e8527d7f0eb33c235 Oct 02 11:39:39 crc kubenswrapper[4783]: I1002 11:39:39.195109 4783 generic.go:334] "Generic (PLEG): container finished" podID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerID="5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0" exitCode=0 Oct 02 11:39:39 crc kubenswrapper[4783]: I1002 11:39:39.195171 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dprk2" event={"ID":"2b0fc9d5-91fa-465e-8cf1-107248e61ea1","Type":"ContainerDied","Data":"5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0"} Oct 02 11:39:39 crc kubenswrapper[4783]: I1002 11:39:39.197186 4783 generic.go:334] "Generic (PLEG): container finished" podID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerID="434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7" exitCode=0 Oct 02 11:39:39 crc kubenswrapper[4783]: I1002 11:39:39.197236 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4qhd" event={"ID":"8f43840a-f4c5-42df-a160-b931a2348c7a","Type":"ContainerDied","Data":"434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7"} Oct 02 11:39:39 crc kubenswrapper[4783]: I1002 11:39:39.197265 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4qhd" event={"ID":"8f43840a-f4c5-42df-a160-b931a2348c7a","Type":"ContainerStarted","Data":"de0bb734e2d66cc075aa7b105de9aa2296f64c218116aa1e8527d7f0eb33c235"} Oct 02 11:39:40 crc kubenswrapper[4783]: I1002 11:39:40.207884 4783 generic.go:334] "Generic (PLEG): container finished" podID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerID="2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b" exitCode=0 Oct 02 11:39:40 crc kubenswrapper[4783]: I1002 11:39:40.207945 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfw7q" event={"ID":"4692d534-cd1c-4ee0-97ba-4edd45d26f51","Type":"ContainerDied","Data":"2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b"} Oct 02 11:39:42 crc kubenswrapper[4783]: I1002 11:39:42.227597 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dprk2" event={"ID":"2b0fc9d5-91fa-465e-8cf1-107248e61ea1","Type":"ContainerStarted","Data":"f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92"} Oct 02 11:39:42 crc kubenswrapper[4783]: I1002 11:39:42.229694 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfw7q" event={"ID":"4692d534-cd1c-4ee0-97ba-4edd45d26f51","Type":"ContainerStarted","Data":"942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223"} Oct 02 11:39:42 crc kubenswrapper[4783]: I1002 11:39:42.231940 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4qhd" event={"ID":"8f43840a-f4c5-42df-a160-b931a2348c7a","Type":"ContainerStarted","Data":"5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c"} Oct 02 11:39:42 crc kubenswrapper[4783]: I1002 11:39:42.298143 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-wfw7q" podStartSLOduration=3.4337630519999998 podStartE2EDuration="8.298124659s" podCreationTimestamp="2025-10-02 11:39:34 +0000 UTC" firstStartedPulling="2025-10-02 11:39:36.138522832 +0000 UTC m=+2809.454717093" lastFinishedPulling="2025-10-02 11:39:41.002884429 +0000 UTC m=+2814.319078700" observedRunningTime="2025-10-02 11:39:42.29284561 +0000 UTC m=+2815.609039871" watchObservedRunningTime="2025-10-02 11:39:42.298124659 +0000 UTC m=+2815.614318920" Oct 02 11:39:43 crc kubenswrapper[4783]: I1002 11:39:43.241796 4783 generic.go:334] "Generic (PLEG): container finished" podID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerID="5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c" exitCode=0 Oct 02 11:39:43 crc kubenswrapper[4783]: I1002 11:39:43.242505 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4qhd" event={"ID":"8f43840a-f4c5-42df-a160-b931a2348c7a","Type":"ContainerDied","Data":"5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c"} Oct 02 11:39:44 crc kubenswrapper[4783]: I1002 11:39:44.252855 4783 generic.go:334] "Generic (PLEG): container finished" podID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerID="f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92" exitCode=0 Oct 02 11:39:44 crc kubenswrapper[4783]: I1002 11:39:44.252918 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dprk2" event={"ID":"2b0fc9d5-91fa-465e-8cf1-107248e61ea1","Type":"ContainerDied","Data":"f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92"} Oct 02 11:39:44 crc kubenswrapper[4783]: I1002 11:39:44.256385 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4qhd" event={"ID":"8f43840a-f4c5-42df-a160-b931a2348c7a","Type":"ContainerStarted","Data":"26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975"} Oct 02 11:39:44 crc kubenswrapper[4783]: I1002 11:39:44.260086 4783 generic.go:334] "Generic (PLEG): container finished" podID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerID="a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab" exitCode=0 Oct 02 11:39:44 crc kubenswrapper[4783]: I1002 11:39:44.260131 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7k4q" event={"ID":"29a87f3e-4167-42e2-95fa-91cd28df9e05","Type":"ContainerDied","Data":"a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab"} Oct 02 11:39:44 crc kubenswrapper[4783]: I1002 11:39:44.322656 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-l4qhd" podStartSLOduration=2.8495964430000003 podStartE2EDuration="7.32263435s" podCreationTimestamp="2025-10-02 11:39:37 +0000 UTC" firstStartedPulling="2025-10-02 11:39:39.199270931 +0000 UTC m=+2812.515465192" lastFinishedPulling="2025-10-02 11:39:43.672308838 +0000 UTC m=+2816.988503099" observedRunningTime="2025-10-02 11:39:44.320546805 +0000 UTC m=+2817.636741086" watchObservedRunningTime="2025-10-02 11:39:44.32263435 +0000 UTC m=+2817.638828611" Oct 02 11:39:45 crc kubenswrapper[4783]: I1002 11:39:45.040135 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:45 crc kubenswrapper[4783]: I1002 11:39:45.040446 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:39:45 crc kubenswrapper[4783]: I1002 11:39:45.270968 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dprk2" event={"ID":"2b0fc9d5-91fa-465e-8cf1-107248e61ea1","Type":"ContainerStarted","Data":"fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec"} Oct 02 11:39:45 crc kubenswrapper[4783]: I1002 11:39:45.273667 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7k4q" event={"ID":"29a87f3e-4167-42e2-95fa-91cd28df9e05","Type":"ContainerStarted","Data":"d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e"} Oct 02 11:39:45 crc kubenswrapper[4783]: I1002 11:39:45.298829 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-dprk2" podStartSLOduration=2.674578252 podStartE2EDuration="8.29880918s" podCreationTimestamp="2025-10-02 11:39:37 +0000 UTC" firstStartedPulling="2025-10-02 11:39:39.199275421 +0000 UTC m=+2812.515469682" lastFinishedPulling="2025-10-02 11:39:44.823506339 +0000 UTC m=+2818.139700610" observedRunningTime="2025-10-02 11:39:45.293969722 +0000 UTC m=+2818.610163983" watchObservedRunningTime="2025-10-02 11:39:45.29880918 +0000 UTC m=+2818.615003441" Oct 02 11:39:45 crc kubenswrapper[4783]: I1002 11:39:45.314552 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j7k4q" podStartSLOduration=2.582551087 podStartE2EDuration="11.314533766s" podCreationTimestamp="2025-10-02 11:39:34 +0000 UTC" firstStartedPulling="2025-10-02 11:39:36.140155425 +0000 UTC m=+2809.456349686" lastFinishedPulling="2025-10-02 11:39:44.872138104 +0000 UTC m=+2818.188332365" observedRunningTime="2025-10-02 11:39:45.309927425 +0000 UTC m=+2818.626121686" watchObservedRunningTime="2025-10-02 11:39:45.314533766 +0000 UTC m=+2818.630728027" Oct 02 11:39:46 crc kubenswrapper[4783]: I1002 11:39:46.092779 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wfw7q" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="registry-server" probeResult="failure" output=< Oct 02 11:39:46 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:39:46 crc kubenswrapper[4783]: > Oct 02 11:39:47 crc kubenswrapper[4783]: I1002 11:39:47.399521 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:47 crc kubenswrapper[4783]: I1002 11:39:47.399806 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:48 crc kubenswrapper[4783]: I1002 11:39:48.037928 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:48 crc kubenswrapper[4783]: I1002 11:39:48.037977 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:48 crc kubenswrapper[4783]: I1002 11:39:48.458853 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-dprk2" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="registry-server" probeResult="failure" output=< Oct 02 11:39:48 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:39:48 crc kubenswrapper[4783]: > Oct 02 11:39:49 crc kubenswrapper[4783]: I1002 11:39:49.082577 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-l4qhd" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="registry-server" probeResult="failure" output=< Oct 02 11:39:49 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:39:49 crc kubenswrapper[4783]: > Oct 02 11:39:54 crc kubenswrapper[4783]: I1002 11:39:54.795590 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:54 crc kubenswrapper[4783]: I1002 11:39:54.796849 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:39:55 crc kubenswrapper[4783]: I1002 11:39:55.851891 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j7k4q" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="registry-server" probeResult="failure" output=< Oct 02 11:39:55 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:39:55 crc kubenswrapper[4783]: > Oct 02 11:39:56 crc kubenswrapper[4783]: I1002 11:39:56.085301 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-wfw7q" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="registry-server" probeResult="failure" output=< Oct 02 11:39:56 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:39:56 crc kubenswrapper[4783]: > Oct 02 11:39:57 crc kubenswrapper[4783]: I1002 11:39:57.446164 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:57 crc kubenswrapper[4783]: I1002 11:39:57.496513 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:57 crc kubenswrapper[4783]: I1002 11:39:57.678493 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dprk2"] Oct 02 11:39:58 crc kubenswrapper[4783]: I1002 11:39:58.097366 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:58 crc kubenswrapper[4783]: I1002 11:39:58.166778 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.403744 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-dprk2" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="registry-server" containerID="cri-o://fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec" gracePeriod=2 Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.870201 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.949151 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-catalog-content\") pod \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.949265 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-utilities\") pod \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.949468 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxfzh\" (UniqueName: \"kubernetes.io/projected/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-kube-api-access-kxfzh\") pod \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\" (UID: \"2b0fc9d5-91fa-465e-8cf1-107248e61ea1\") " Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.950078 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-utilities" (OuterVolumeSpecName: "utilities") pod "2b0fc9d5-91fa-465e-8cf1-107248e61ea1" (UID: "2b0fc9d5-91fa-465e-8cf1-107248e61ea1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.954916 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-kube-api-access-kxfzh" (OuterVolumeSpecName: "kube-api-access-kxfzh") pod "2b0fc9d5-91fa-465e-8cf1-107248e61ea1" (UID: "2b0fc9d5-91fa-465e-8cf1-107248e61ea1"). InnerVolumeSpecName "kube-api-access-kxfzh". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:39:59 crc kubenswrapper[4783]: I1002 11:39:59.993178 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2b0fc9d5-91fa-465e-8cf1-107248e61ea1" (UID: "2b0fc9d5-91fa-465e-8cf1-107248e61ea1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.051712 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxfzh\" (UniqueName: \"kubernetes.io/projected/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-kube-api-access-kxfzh\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.051760 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.051773 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2b0fc9d5-91fa-465e-8cf1-107248e61ea1-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.414312 4783 generic.go:334] "Generic (PLEG): container finished" podID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerID="fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec" exitCode=0 Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.414350 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dprk2" event={"ID":"2b0fc9d5-91fa-465e-8cf1-107248e61ea1","Type":"ContainerDied","Data":"fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec"} Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.414382 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-dprk2" event={"ID":"2b0fc9d5-91fa-465e-8cf1-107248e61ea1","Type":"ContainerDied","Data":"3ad238e1b57169894ba6643479c3b6f25d01c2151c7c7e4afd766a07c709f09f"} Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.414397 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-dprk2" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.414469 4783 scope.go:117] "RemoveContainer" containerID="fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.444312 4783 scope.go:117] "RemoveContainer" containerID="f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.449475 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-dprk2"] Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.474470 4783 scope.go:117] "RemoveContainer" containerID="5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.480265 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-dprk2"] Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.490775 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4qhd"] Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.491050 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-l4qhd" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="registry-server" containerID="cri-o://26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975" gracePeriod=2 Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.533636 4783 scope.go:117] "RemoveContainer" containerID="fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec" Oct 02 11:40:00 crc kubenswrapper[4783]: E1002 11:40:00.534279 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec\": container with ID starting with fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec not found: ID does not exist" containerID="fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.534315 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec"} err="failed to get container status \"fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec\": rpc error: code = NotFound desc = could not find container \"fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec\": container with ID starting with fa243f19aa1e2c165d56ef7007bb6e4cc03af0abd294c32a83486fef270818ec not found: ID does not exist" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.534341 4783 scope.go:117] "RemoveContainer" containerID="f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92" Oct 02 11:40:00 crc kubenswrapper[4783]: E1002 11:40:00.534617 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92\": container with ID starting with f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92 not found: ID does not exist" containerID="f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.534637 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92"} err="failed to get container status \"f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92\": rpc error: code = NotFound desc = could not find container \"f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92\": container with ID starting with f0761fc08573198c8c5f174ca9d12508144a900bc53d0d20760de7dbfe97df92 not found: ID does not exist" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.534652 4783 scope.go:117] "RemoveContainer" containerID="5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0" Oct 02 11:40:00 crc kubenswrapper[4783]: E1002 11:40:00.535002 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0\": container with ID starting with 5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0 not found: ID does not exist" containerID="5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.535032 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0"} err="failed to get container status \"5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0\": rpc error: code = NotFound desc = could not find container \"5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0\": container with ID starting with 5c18b98f70bbfffe030f5c17af145456795a54fce3fbf28fa8e046b3cadaecd0 not found: ID does not exist" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.928997 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.968643 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l7rv7\" (UniqueName: \"kubernetes.io/projected/8f43840a-f4c5-42df-a160-b931a2348c7a-kube-api-access-l7rv7\") pod \"8f43840a-f4c5-42df-a160-b931a2348c7a\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.968774 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-catalog-content\") pod \"8f43840a-f4c5-42df-a160-b931a2348c7a\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.968867 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-utilities\") pod \"8f43840a-f4c5-42df-a160-b931a2348c7a\" (UID: \"8f43840a-f4c5-42df-a160-b931a2348c7a\") " Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.970001 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-utilities" (OuterVolumeSpecName: "utilities") pod "8f43840a-f4c5-42df-a160-b931a2348c7a" (UID: "8f43840a-f4c5-42df-a160-b931a2348c7a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.976699 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f43840a-f4c5-42df-a160-b931a2348c7a-kube-api-access-l7rv7" (OuterVolumeSpecName: "kube-api-access-l7rv7") pod "8f43840a-f4c5-42df-a160-b931a2348c7a" (UID: "8f43840a-f4c5-42df-a160-b931a2348c7a"). InnerVolumeSpecName "kube-api-access-l7rv7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:40:00 crc kubenswrapper[4783]: I1002 11:40:00.982994 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8f43840a-f4c5-42df-a160-b931a2348c7a" (UID: "8f43840a-f4c5-42df-a160-b931a2348c7a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.070744 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l7rv7\" (UniqueName: \"kubernetes.io/projected/8f43840a-f4c5-42df-a160-b931a2348c7a-kube-api-access-l7rv7\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.070787 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.070799 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8f43840a-f4c5-42df-a160-b931a2348c7a-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.441496 4783 generic.go:334] "Generic (PLEG): container finished" podID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerID="26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975" exitCode=0 Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.441537 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4qhd" event={"ID":"8f43840a-f4c5-42df-a160-b931a2348c7a","Type":"ContainerDied","Data":"26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975"} Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.441563 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-l4qhd" event={"ID":"8f43840a-f4c5-42df-a160-b931a2348c7a","Type":"ContainerDied","Data":"de0bb734e2d66cc075aa7b105de9aa2296f64c218116aa1e8527d7f0eb33c235"} Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.441581 4783 scope.go:117] "RemoveContainer" containerID="26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.441627 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-l4qhd" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.479626 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4qhd"] Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.479670 4783 scope.go:117] "RemoveContainer" containerID="5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.489662 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-l4qhd"] Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.498887 4783 scope.go:117] "RemoveContainer" containerID="434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.524174 4783 scope.go:117] "RemoveContainer" containerID="26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975" Oct 02 11:40:01 crc kubenswrapper[4783]: E1002 11:40:01.524549 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975\": container with ID starting with 26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975 not found: ID does not exist" containerID="26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.524674 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975"} err="failed to get container status \"26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975\": rpc error: code = NotFound desc = could not find container \"26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975\": container with ID starting with 26c507dbbdf4e1d3308135815f4feb66aac07d2433008421bb83c7987fb3b975 not found: ID does not exist" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.524770 4783 scope.go:117] "RemoveContainer" containerID="5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c" Oct 02 11:40:01 crc kubenswrapper[4783]: E1002 11:40:01.525023 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c\": container with ID starting with 5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c not found: ID does not exist" containerID="5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.525048 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c"} err="failed to get container status \"5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c\": rpc error: code = NotFound desc = could not find container \"5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c\": container with ID starting with 5c95031ba712ed9a33a7a1896dd74afd4ebb561a3936d1ab2240013487ed6c9c not found: ID does not exist" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.525063 4783 scope.go:117] "RemoveContainer" containerID="434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7" Oct 02 11:40:01 crc kubenswrapper[4783]: E1002 11:40:01.525300 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7\": container with ID starting with 434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7 not found: ID does not exist" containerID="434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.525321 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7"} err="failed to get container status \"434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7\": rpc error: code = NotFound desc = could not find container \"434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7\": container with ID starting with 434cf08ebb6b3d4734b0ec6a5a47ae4ca411f79c3e1655faa04c910e6fe3cdf7 not found: ID does not exist" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.556541 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" path="/var/lib/kubelet/pods/2b0fc9d5-91fa-465e-8cf1-107248e61ea1/volumes" Oct 02 11:40:01 crc kubenswrapper[4783]: I1002 11:40:01.557461 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" path="/var/lib/kubelet/pods/8f43840a-f4c5-42df-a160-b931a2348c7a/volumes" Oct 02 11:40:05 crc kubenswrapper[4783]: I1002 11:40:05.086288 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:40:05 crc kubenswrapper[4783]: I1002 11:40:05.135625 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:40:05 crc kubenswrapper[4783]: I1002 11:40:05.841855 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j7k4q" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="registry-server" probeResult="failure" output=< Oct 02 11:40:05 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:40:05 crc kubenswrapper[4783]: > Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.282732 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wfw7q"] Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.485397 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-wfw7q" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="registry-server" containerID="cri-o://942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223" gracePeriod=2 Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.894570 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.991720 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-utilities\") pod \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.991762 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-catalog-content\") pod \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.991991 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dzhg7\" (UniqueName: \"kubernetes.io/projected/4692d534-cd1c-4ee0-97ba-4edd45d26f51-kube-api-access-dzhg7\") pod \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\" (UID: \"4692d534-cd1c-4ee0-97ba-4edd45d26f51\") " Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.992373 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-utilities" (OuterVolumeSpecName: "utilities") pod "4692d534-cd1c-4ee0-97ba-4edd45d26f51" (UID: "4692d534-cd1c-4ee0-97ba-4edd45d26f51"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:40:06 crc kubenswrapper[4783]: I1002 11:40:06.992623 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.000486 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4692d534-cd1c-4ee0-97ba-4edd45d26f51-kube-api-access-dzhg7" (OuterVolumeSpecName: "kube-api-access-dzhg7") pod "4692d534-cd1c-4ee0-97ba-4edd45d26f51" (UID: "4692d534-cd1c-4ee0-97ba-4edd45d26f51"). InnerVolumeSpecName "kube-api-access-dzhg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.035979 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4692d534-cd1c-4ee0-97ba-4edd45d26f51" (UID: "4692d534-cd1c-4ee0-97ba-4edd45d26f51"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.094357 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dzhg7\" (UniqueName: \"kubernetes.io/projected/4692d534-cd1c-4ee0-97ba-4edd45d26f51-kube-api-access-dzhg7\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.094398 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4692d534-cd1c-4ee0-97ba-4edd45d26f51-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.495274 4783 generic.go:334] "Generic (PLEG): container finished" podID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerID="942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223" exitCode=0 Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.495321 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfw7q" event={"ID":"4692d534-cd1c-4ee0-97ba-4edd45d26f51","Type":"ContainerDied","Data":"942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223"} Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.495648 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-wfw7q" event={"ID":"4692d534-cd1c-4ee0-97ba-4edd45d26f51","Type":"ContainerDied","Data":"46ccf0e62388bb3ce909959762a5652789ceb9039712c9fadcbf7e7cd683cbaa"} Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.495666 4783 scope.go:117] "RemoveContainer" containerID="942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.495338 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-wfw7q" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.515851 4783 scope.go:117] "RemoveContainer" containerID="2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.533974 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-wfw7q"] Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.544695 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-wfw7q"] Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.573185 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" path="/var/lib/kubelet/pods/4692d534-cd1c-4ee0-97ba-4edd45d26f51/volumes" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.593008 4783 scope.go:117] "RemoveContainer" containerID="478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.641567 4783 scope.go:117] "RemoveContainer" containerID="942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223" Oct 02 11:40:07 crc kubenswrapper[4783]: E1002 11:40:07.642056 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223\": container with ID starting with 942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223 not found: ID does not exist" containerID="942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.642088 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223"} err="failed to get container status \"942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223\": rpc error: code = NotFound desc = could not find container \"942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223\": container with ID starting with 942ba5284a6b7cbb61e2b7d1873760c4cb473aa1234fdcac3db2c59faf1b3223 not found: ID does not exist" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.642109 4783 scope.go:117] "RemoveContainer" containerID="2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b" Oct 02 11:40:07 crc kubenswrapper[4783]: E1002 11:40:07.642477 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b\": container with ID starting with 2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b not found: ID does not exist" containerID="2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.642503 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b"} err="failed to get container status \"2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b\": rpc error: code = NotFound desc = could not find container \"2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b\": container with ID starting with 2edaa754fe36c8794e84e2451d30523fa2f8cf9775b2b7349a2a48617369309b not found: ID does not exist" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.642515 4783 scope.go:117] "RemoveContainer" containerID="478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6" Oct 02 11:40:07 crc kubenswrapper[4783]: E1002 11:40:07.642915 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6\": container with ID starting with 478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6 not found: ID does not exist" containerID="478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6" Oct 02 11:40:07 crc kubenswrapper[4783]: I1002 11:40:07.642938 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6"} err="failed to get container status \"478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6\": rpc error: code = NotFound desc = could not find container \"478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6\": container with ID starting with 478a4359b83913c538424eb4033f34fcb624fe4b2d656698e9a35c63618021e6 not found: ID does not exist" Oct 02 11:40:14 crc kubenswrapper[4783]: I1002 11:40:14.842787 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:40:14 crc kubenswrapper[4783]: I1002 11:40:14.892057 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:40:15 crc kubenswrapper[4783]: I1002 11:40:15.083477 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j7k4q"] Oct 02 11:40:16 crc kubenswrapper[4783]: I1002 11:40:16.573615 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j7k4q" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="registry-server" containerID="cri-o://d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e" gracePeriod=2 Oct 02 11:40:16 crc kubenswrapper[4783]: I1002 11:40:16.975318 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.079854 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-catalog-content\") pod \"29a87f3e-4167-42e2-95fa-91cd28df9e05\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.080042 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-utilities\") pod \"29a87f3e-4167-42e2-95fa-91cd28df9e05\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.080105 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjq7q\" (UniqueName: \"kubernetes.io/projected/29a87f3e-4167-42e2-95fa-91cd28df9e05-kube-api-access-sjq7q\") pod \"29a87f3e-4167-42e2-95fa-91cd28df9e05\" (UID: \"29a87f3e-4167-42e2-95fa-91cd28df9e05\") " Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.080822 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-utilities" (OuterVolumeSpecName: "utilities") pod "29a87f3e-4167-42e2-95fa-91cd28df9e05" (UID: "29a87f3e-4167-42e2-95fa-91cd28df9e05"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.081782 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.086328 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29a87f3e-4167-42e2-95fa-91cd28df9e05-kube-api-access-sjq7q" (OuterVolumeSpecName: "kube-api-access-sjq7q") pod "29a87f3e-4167-42e2-95fa-91cd28df9e05" (UID: "29a87f3e-4167-42e2-95fa-91cd28df9e05"). InnerVolumeSpecName "kube-api-access-sjq7q". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.165471 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29a87f3e-4167-42e2-95fa-91cd28df9e05" (UID: "29a87f3e-4167-42e2-95fa-91cd28df9e05"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.183255 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29a87f3e-4167-42e2-95fa-91cd28df9e05-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.183285 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjq7q\" (UniqueName: \"kubernetes.io/projected/29a87f3e-4167-42e2-95fa-91cd28df9e05-kube-api-access-sjq7q\") on node \"crc\" DevicePath \"\"" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.584850 4783 generic.go:334] "Generic (PLEG): container finished" podID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerID="d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e" exitCode=0 Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.585172 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7k4q" event={"ID":"29a87f3e-4167-42e2-95fa-91cd28df9e05","Type":"ContainerDied","Data":"d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e"} Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.585160 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j7k4q" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.585245 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j7k4q" event={"ID":"29a87f3e-4167-42e2-95fa-91cd28df9e05","Type":"ContainerDied","Data":"4895a8ef413a3e4dabc7239eb6de70eede7da60a5646e433651d1583b89f583d"} Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.585266 4783 scope.go:117] "RemoveContainer" containerID="d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.610614 4783 scope.go:117] "RemoveContainer" containerID="a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.615547 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j7k4q"] Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.624484 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j7k4q"] Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.630700 4783 scope.go:117] "RemoveContainer" containerID="5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.684659 4783 scope.go:117] "RemoveContainer" containerID="d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e" Oct 02 11:40:17 crc kubenswrapper[4783]: E1002 11:40:17.685650 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e\": container with ID starting with d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e not found: ID does not exist" containerID="d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.685681 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e"} err="failed to get container status \"d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e\": rpc error: code = NotFound desc = could not find container \"d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e\": container with ID starting with d59a49e47c7cde980841f599ad17b37bcd9bee352825004c2742c870adf5403e not found: ID does not exist" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.685703 4783 scope.go:117] "RemoveContainer" containerID="a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab" Oct 02 11:40:17 crc kubenswrapper[4783]: E1002 11:40:17.686124 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab\": container with ID starting with a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab not found: ID does not exist" containerID="a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.686146 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab"} err="failed to get container status \"a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab\": rpc error: code = NotFound desc = could not find container \"a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab\": container with ID starting with a967aacf203827aabb43e6ed425722d484446139bf32dce25009f77f6bc54bab not found: ID does not exist" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.686159 4783 scope.go:117] "RemoveContainer" containerID="5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540" Oct 02 11:40:17 crc kubenswrapper[4783]: E1002 11:40:17.686552 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540\": container with ID starting with 5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540 not found: ID does not exist" containerID="5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540" Oct 02 11:40:17 crc kubenswrapper[4783]: I1002 11:40:17.686573 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540"} err="failed to get container status \"5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540\": rpc error: code = NotFound desc = could not find container \"5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540\": container with ID starting with 5b88e9344dc4f5384f4e53043ca088ee7e5a8e4ddfc5990b457a2267209b2540 not found: ID does not exist" Oct 02 11:40:19 crc kubenswrapper[4783]: I1002 11:40:19.560160 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" path="/var/lib/kubelet/pods/29a87f3e-4167-42e2-95fa-91cd28df9e05/volumes" Oct 02 11:40:51 crc kubenswrapper[4783]: I1002 11:40:51.513847 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:40:51 crc kubenswrapper[4783]: I1002 11:40:51.514865 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:41:21 crc kubenswrapper[4783]: I1002 11:41:21.513934 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:41:21 crc kubenswrapper[4783]: I1002 11:41:21.515176 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:41:51 crc kubenswrapper[4783]: I1002 11:41:51.513562 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:41:51 crc kubenswrapper[4783]: I1002 11:41:51.515001 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:41:51 crc kubenswrapper[4783]: I1002 11:41:51.515088 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:41:51 crc kubenswrapper[4783]: I1002 11:41:51.516031 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:41:51 crc kubenswrapper[4783]: I1002 11:41:51.516118 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" gracePeriod=600 Oct 02 11:41:51 crc kubenswrapper[4783]: E1002 11:41:51.634607 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:41:52 crc kubenswrapper[4783]: I1002 11:41:52.377296 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" exitCode=0 Oct 02 11:41:52 crc kubenswrapper[4783]: I1002 11:41:52.377351 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9"} Oct 02 11:41:52 crc kubenswrapper[4783]: I1002 11:41:52.377665 4783 scope.go:117] "RemoveContainer" containerID="ef440e982275f435833cad6571cb19a94a65f2aa9dda0af7e8553bbcd9f7b29c" Oct 02 11:41:52 crc kubenswrapper[4783]: I1002 11:41:52.378855 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:41:52 crc kubenswrapper[4783]: E1002 11:41:52.379358 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:42:06 crc kubenswrapper[4783]: I1002 11:42:06.545545 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:42:06 crc kubenswrapper[4783]: E1002 11:42:06.546436 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:42:17 crc kubenswrapper[4783]: I1002 11:42:17.561229 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:42:17 crc kubenswrapper[4783]: E1002 11:42:17.562097 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:42:32 crc kubenswrapper[4783]: I1002 11:42:32.544694 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:42:32 crc kubenswrapper[4783]: E1002 11:42:32.545557 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:42:36 crc kubenswrapper[4783]: I1002 11:42:36.758000 4783 generic.go:334] "Generic (PLEG): container finished" podID="7f8d39f6-ae36-44ac-87ce-da7542ace825" containerID="5e20a05649a7a8cf2e209b5e8a0f39caeae24a5485117341ef2bd60af5b0c32d" exitCode=2 Oct 02 11:42:36 crc kubenswrapper[4783]: I1002 11:42:36.758089 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" event={"ID":"7f8d39f6-ae36-44ac-87ce-da7542ace825","Type":"ContainerDied","Data":"5e20a05649a7a8cf2e209b5e8a0f39caeae24a5485117341ef2bd60af5b0c32d"} Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.115781 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.200731 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-0\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.201090 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-ssh-key\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.201248 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-1\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.201372 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rs8vk\" (UniqueName: \"kubernetes.io/projected/7f8d39f6-ae36-44ac-87ce-da7542ace825-kube-api-access-rs8vk\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.201531 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-extra-config-0\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.201940 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-0\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.202084 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-1\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.202197 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-inventory\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.202338 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-combined-ca-bundle\") pod \"7f8d39f6-ae36-44ac-87ce-da7542ace825\" (UID: \"7f8d39f6-ae36-44ac-87ce-da7542ace825\") " Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.217722 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.218297 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f8d39f6-ae36-44ac-87ce-da7542ace825-kube-api-access-rs8vk" (OuterVolumeSpecName: "kube-api-access-rs8vk") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "kube-api-access-rs8vk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.226730 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.229901 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.238702 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.240644 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.244546 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.249362 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.256176 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-inventory" (OuterVolumeSpecName: "inventory") pod "7f8d39f6-ae36-44ac-87ce-da7542ace825" (UID: "7f8d39f6-ae36-44ac-87ce-da7542ace825"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306487 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306524 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306539 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rs8vk\" (UniqueName: \"kubernetes.io/projected/7f8d39f6-ae36-44ac-87ce-da7542ace825-kube-api-access-rs8vk\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306551 4783 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306561 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306572 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306583 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306593 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.306604 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/7f8d39f6-ae36-44ac-87ce-da7542ace825-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.788331 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" event={"ID":"7f8d39f6-ae36-44ac-87ce-da7542ace825","Type":"ContainerDied","Data":"7371cd1a1944674ef82ded4c620401b3c144a8620dab4c876de70f559db3b5ab"} Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.788386 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7371cd1a1944674ef82ded4c620401b3c144a8620dab4c876de70f559db3b5ab" Oct 02 11:42:38 crc kubenswrapper[4783]: I1002 11:42:38.788433 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-xp2l6" Oct 02 11:42:43 crc kubenswrapper[4783]: I1002 11:42:43.544737 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:42:43 crc kubenswrapper[4783]: E1002 11:42:43.545545 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.038651 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67"] Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039108 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039128 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039148 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039157 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039172 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039180 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039192 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039200 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039224 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f8d39f6-ae36-44ac-87ce-da7542ace825" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039233 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f8d39f6-ae36-44ac-87ce-da7542ace825" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039244 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039254 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039271 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039279 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039287 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039294 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039305 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039312 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039323 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039331 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039348 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039355 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="extract-utilities" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039364 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039372 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: E1002 11:42:46.039391 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039399 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="extract-content" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039643 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4692d534-cd1c-4ee0-97ba-4edd45d26f51" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039656 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f43840a-f4c5-42df-a160-b931a2348c7a" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039667 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f8d39f6-ae36-44ac-87ce-da7542ace825" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039690 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="29a87f3e-4167-42e2-95fa-91cd28df9e05" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.039700 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="2b0fc9d5-91fa-465e-8cf1-107248e61ea1" containerName="registry-server" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.040487 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.052255 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.055383 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.055541 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.055601 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.056377 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.056583 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.056671 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.058547 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67"] Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156067 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156122 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156178 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v8fzr\" (UniqueName: \"kubernetes.io/projected/e68279d9-8229-404b-9d1f-f5963f2e7995-kube-api-access-v8fzr\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156250 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156294 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156328 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156479 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156568 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.156616 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258090 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258198 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258226 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258276 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v8fzr\" (UniqueName: \"kubernetes.io/projected/e68279d9-8229-404b-9d1f-f5963f2e7995-kube-api-access-v8fzr\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258299 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258495 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258556 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258635 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.258714 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.260998 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.266660 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.269051 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.269380 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.269404 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.270028 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.271388 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.272867 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.284111 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v8fzr\" (UniqueName: \"kubernetes.io/projected/e68279d9-8229-404b-9d1f-f5963f2e7995-kube-api-access-v8fzr\") pod \"nova-edpm-deployment-openstack-edpm-ipam-8fx67\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.386479 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:42:46 crc kubenswrapper[4783]: I1002 11:42:46.956062 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67"] Oct 02 11:42:47 crc kubenswrapper[4783]: I1002 11:42:47.878721 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" event={"ID":"e68279d9-8229-404b-9d1f-f5963f2e7995","Type":"ContainerStarted","Data":"c0c2c729743259af3704df6aa51f7c2d88cf2acbfc8ede8762c2234e7357749f"} Oct 02 11:42:47 crc kubenswrapper[4783]: I1002 11:42:47.879564 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" event={"ID":"e68279d9-8229-404b-9d1f-f5963f2e7995","Type":"ContainerStarted","Data":"fc5be9db224b1beb3e75c6442948852047e99d0ee56ebb33d5bece2c97bed27a"} Oct 02 11:42:58 crc kubenswrapper[4783]: I1002 11:42:58.546123 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:42:58 crc kubenswrapper[4783]: E1002 11:42:58.546995 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:43:10 crc kubenswrapper[4783]: I1002 11:43:10.545478 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:43:10 crc kubenswrapper[4783]: E1002 11:43:10.546185 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:43:25 crc kubenswrapper[4783]: I1002 11:43:25.546756 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:43:25 crc kubenswrapper[4783]: E1002 11:43:25.547399 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:43:40 crc kubenswrapper[4783]: I1002 11:43:40.545634 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:43:40 crc kubenswrapper[4783]: E1002 11:43:40.550045 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:43:52 crc kubenswrapper[4783]: I1002 11:43:52.545455 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:43:52 crc kubenswrapper[4783]: E1002 11:43:52.546324 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:43:58 crc kubenswrapper[4783]: I1002 11:43:58.470328 4783 generic.go:334] "Generic (PLEG): container finished" podID="e68279d9-8229-404b-9d1f-f5963f2e7995" containerID="c0c2c729743259af3704df6aa51f7c2d88cf2acbfc8ede8762c2234e7357749f" exitCode=2 Oct 02 11:43:58 crc kubenswrapper[4783]: I1002 11:43:58.470882 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" event={"ID":"e68279d9-8229-404b-9d1f-f5963f2e7995","Type":"ContainerDied","Data":"c0c2c729743259af3704df6aa51f7c2d88cf2acbfc8ede8762c2234e7357749f"} Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.874493 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961166 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-1\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961261 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-0\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961332 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-extra-config-0\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961352 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-0\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961405 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v8fzr\" (UniqueName: \"kubernetes.io/projected/e68279d9-8229-404b-9d1f-f5963f2e7995-kube-api-access-v8fzr\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961472 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-ssh-key\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961507 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-combined-ca-bundle\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961601 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-inventory\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.961614 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-1\") pod \"e68279d9-8229-404b-9d1f-f5963f2e7995\" (UID: \"e68279d9-8229-404b-9d1f-f5963f2e7995\") " Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.969138 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.985217 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e68279d9-8229-404b-9d1f-f5963f2e7995-kube-api-access-v8fzr" (OuterVolumeSpecName: "kube-api-access-v8fzr") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "kube-api-access-v8fzr". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.986874 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.991678 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-inventory" (OuterVolumeSpecName: "inventory") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.993522 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.995202 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:43:59 crc kubenswrapper[4783]: I1002 11:43:59.998168 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.004277 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.017542 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "e68279d9-8229-404b-9d1f-f5963f2e7995" (UID: "e68279d9-8229-404b-9d1f-f5963f2e7995"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.064953 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065022 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065033 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065043 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065053 4783 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065080 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065088 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v8fzr\" (UniqueName: \"kubernetes.io/projected/e68279d9-8229-404b-9d1f-f5963f2e7995-kube-api-access-v8fzr\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065096 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.065104 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e68279d9-8229-404b-9d1f-f5963f2e7995-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.500066 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" event={"ID":"e68279d9-8229-404b-9d1f-f5963f2e7995","Type":"ContainerDied","Data":"fc5be9db224b1beb3e75c6442948852047e99d0ee56ebb33d5bece2c97bed27a"} Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.500122 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fc5be9db224b1beb3e75c6442948852047e99d0ee56ebb33d5bece2c97bed27a" Oct 02 11:44:00 crc kubenswrapper[4783]: I1002 11:44:00.500469 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-8fx67" Oct 02 11:44:04 crc kubenswrapper[4783]: I1002 11:44:04.544774 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:44:04 crc kubenswrapper[4783]: E1002 11:44:04.545564 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.034573 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p"] Oct 02 11:44:17 crc kubenswrapper[4783]: E1002 11:44:17.036676 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e68279d9-8229-404b-9d1f-f5963f2e7995" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.036763 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e68279d9-8229-404b-9d1f-f5963f2e7995" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.037033 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="e68279d9-8229-404b-9d1f-f5963f2e7995" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.037824 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.041140 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.041871 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.042459 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.043974 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p"] Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.044528 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.044611 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.044641 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.045176 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073711 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073792 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073826 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073850 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6pp6s\" (UniqueName: \"kubernetes.io/projected/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-kube-api-access-6pp6s\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073873 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073898 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073915 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.073968 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.074005 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.175700 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.175795 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.175853 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6pp6s\" (UniqueName: \"kubernetes.io/projected/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-kube-api-access-6pp6s\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.175893 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.176353 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.176387 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.177084 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.177272 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.177351 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.177703 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.182998 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.184536 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.184723 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.189170 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.189879 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.190143 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.193369 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.196458 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6pp6s\" (UniqueName: \"kubernetes.io/projected/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-kube-api-access-6pp6s\") pod \"nova-edpm-deployment-openstack-edpm-ipam-v8t7p\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.358620 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.552201 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:44:17 crc kubenswrapper[4783]: E1002 11:44:17.552916 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.892444 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p"] Oct 02 11:44:17 crc kubenswrapper[4783]: I1002 11:44:17.900375 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:44:18 crc kubenswrapper[4783]: I1002 11:44:18.681880 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" event={"ID":"fa70f2b8-5bce-4ebe-a067-66d7dd57e787","Type":"ContainerStarted","Data":"598e05f18695be5ea6f3b567f2b9b1d3e45a74e6b50d400b5b63c60444d4078b"} Oct 02 11:44:18 crc kubenswrapper[4783]: I1002 11:44:18.682289 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" event={"ID":"fa70f2b8-5bce-4ebe-a067-66d7dd57e787","Type":"ContainerStarted","Data":"de31000724d7bd1f4df91adea5547adca3fec526b10b1887e0ccfc8156add284"} Oct 02 11:44:18 crc kubenswrapper[4783]: I1002 11:44:18.707823 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" podStartSLOduration=1.393801428 podStartE2EDuration="1.707800369s" podCreationTimestamp="2025-10-02 11:44:17 +0000 UTC" firstStartedPulling="2025-10-02 11:44:17.900163692 +0000 UTC m=+3091.216357953" lastFinishedPulling="2025-10-02 11:44:18.214162633 +0000 UTC m=+3091.530356894" observedRunningTime="2025-10-02 11:44:18.698893247 +0000 UTC m=+3092.015087508" watchObservedRunningTime="2025-10-02 11:44:18.707800369 +0000 UTC m=+3092.023994640" Oct 02 11:44:28 crc kubenswrapper[4783]: I1002 11:44:28.544659 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:44:28 crc kubenswrapper[4783]: E1002 11:44:28.545498 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:44:42 crc kubenswrapper[4783]: I1002 11:44:42.545153 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:44:42 crc kubenswrapper[4783]: E1002 11:44:42.545970 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:44:53 crc kubenswrapper[4783]: I1002 11:44:53.546039 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:44:53 crc kubenswrapper[4783]: E1002 11:44:53.547088 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.160777 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k"] Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.162913 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.168002 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.168263 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.172147 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k"] Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.204129 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bpn4z\" (UniqueName: \"kubernetes.io/projected/c5338b7b-07a7-4d69-a213-eabbd45c454e-kube-api-access-bpn4z\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.204227 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c5338b7b-07a7-4d69-a213-eabbd45c454e-config-volume\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.204341 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c5338b7b-07a7-4d69-a213-eabbd45c454e-secret-volume\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.306647 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c5338b7b-07a7-4d69-a213-eabbd45c454e-secret-volume\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.306810 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bpn4z\" (UniqueName: \"kubernetes.io/projected/c5338b7b-07a7-4d69-a213-eabbd45c454e-kube-api-access-bpn4z\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.306863 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c5338b7b-07a7-4d69-a213-eabbd45c454e-config-volume\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.307719 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c5338b7b-07a7-4d69-a213-eabbd45c454e-config-volume\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.312658 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c5338b7b-07a7-4d69-a213-eabbd45c454e-secret-volume\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.325341 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bpn4z\" (UniqueName: \"kubernetes.io/projected/c5338b7b-07a7-4d69-a213-eabbd45c454e-kube-api-access-bpn4z\") pod \"collect-profiles-29323425-5pd8k\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.487841 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:00 crc kubenswrapper[4783]: I1002 11:45:00.947000 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k"] Oct 02 11:45:00 crc kubenswrapper[4783]: W1002 11:45:00.951744 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc5338b7b_07a7_4d69_a213_eabbd45c454e.slice/crio-91019152b2766ffebd7060777b7a9430ae000e2af9dbd1dc11a1c6fea8fe9cde WatchSource:0}: Error finding container 91019152b2766ffebd7060777b7a9430ae000e2af9dbd1dc11a1c6fea8fe9cde: Status 404 returned error can't find the container with id 91019152b2766ffebd7060777b7a9430ae000e2af9dbd1dc11a1c6fea8fe9cde Oct 02 11:45:01 crc kubenswrapper[4783]: I1002 11:45:01.061151 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" event={"ID":"c5338b7b-07a7-4d69-a213-eabbd45c454e","Type":"ContainerStarted","Data":"91019152b2766ffebd7060777b7a9430ae000e2af9dbd1dc11a1c6fea8fe9cde"} Oct 02 11:45:02 crc kubenswrapper[4783]: I1002 11:45:02.071026 4783 generic.go:334] "Generic (PLEG): container finished" podID="c5338b7b-07a7-4d69-a213-eabbd45c454e" containerID="6d03e9cb0d48a2a7c5cfb59239f11da550441cd61850508eb7526f59a9b82637" exitCode=0 Oct 02 11:45:02 crc kubenswrapper[4783]: I1002 11:45:02.071129 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" event={"ID":"c5338b7b-07a7-4d69-a213-eabbd45c454e","Type":"ContainerDied","Data":"6d03e9cb0d48a2a7c5cfb59239f11da550441cd61850508eb7526f59a9b82637"} Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.408327 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.475223 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c5338b7b-07a7-4d69-a213-eabbd45c454e-secret-volume\") pod \"c5338b7b-07a7-4d69-a213-eabbd45c454e\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.475715 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c5338b7b-07a7-4d69-a213-eabbd45c454e-config-volume\") pod \"c5338b7b-07a7-4d69-a213-eabbd45c454e\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.475939 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bpn4z\" (UniqueName: \"kubernetes.io/projected/c5338b7b-07a7-4d69-a213-eabbd45c454e-kube-api-access-bpn4z\") pod \"c5338b7b-07a7-4d69-a213-eabbd45c454e\" (UID: \"c5338b7b-07a7-4d69-a213-eabbd45c454e\") " Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.477997 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c5338b7b-07a7-4d69-a213-eabbd45c454e-config-volume" (OuterVolumeSpecName: "config-volume") pod "c5338b7b-07a7-4d69-a213-eabbd45c454e" (UID: "c5338b7b-07a7-4d69-a213-eabbd45c454e"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.484227 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c5338b7b-07a7-4d69-a213-eabbd45c454e-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "c5338b7b-07a7-4d69-a213-eabbd45c454e" (UID: "c5338b7b-07a7-4d69-a213-eabbd45c454e"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.484257 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c5338b7b-07a7-4d69-a213-eabbd45c454e-kube-api-access-bpn4z" (OuterVolumeSpecName: "kube-api-access-bpn4z") pod "c5338b7b-07a7-4d69-a213-eabbd45c454e" (UID: "c5338b7b-07a7-4d69-a213-eabbd45c454e"). InnerVolumeSpecName "kube-api-access-bpn4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.579128 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/c5338b7b-07a7-4d69-a213-eabbd45c454e-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.579169 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bpn4z\" (UniqueName: \"kubernetes.io/projected/c5338b7b-07a7-4d69-a213-eabbd45c454e-kube-api-access-bpn4z\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:03 crc kubenswrapper[4783]: I1002 11:45:03.579186 4783 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/c5338b7b-07a7-4d69-a213-eabbd45c454e-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:04 crc kubenswrapper[4783]: I1002 11:45:04.089314 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" event={"ID":"c5338b7b-07a7-4d69-a213-eabbd45c454e","Type":"ContainerDied","Data":"91019152b2766ffebd7060777b7a9430ae000e2af9dbd1dc11a1c6fea8fe9cde"} Oct 02 11:45:04 crc kubenswrapper[4783]: I1002 11:45:04.089356 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323425-5pd8k" Oct 02 11:45:04 crc kubenswrapper[4783]: I1002 11:45:04.089363 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="91019152b2766ffebd7060777b7a9430ae000e2af9dbd1dc11a1c6fea8fe9cde" Oct 02 11:45:04 crc kubenswrapper[4783]: I1002 11:45:04.488731 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47"] Oct 02 11:45:04 crc kubenswrapper[4783]: I1002 11:45:04.496099 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323380-ftb47"] Oct 02 11:45:05 crc kubenswrapper[4783]: I1002 11:45:05.545592 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:45:05 crc kubenswrapper[4783]: E1002 11:45:05.546185 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:45:05 crc kubenswrapper[4783]: I1002 11:45:05.562373 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c1eb0a7-8e6f-4c80-a838-9058696e2591" path="/var/lib/kubelet/pods/1c1eb0a7-8e6f-4c80-a838-9058696e2591/volumes" Oct 02 11:45:20 crc kubenswrapper[4783]: I1002 11:45:20.546097 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:45:20 crc kubenswrapper[4783]: E1002 11:45:20.546856 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:45:28 crc kubenswrapper[4783]: I1002 11:45:28.302655 4783 generic.go:334] "Generic (PLEG): container finished" podID="fa70f2b8-5bce-4ebe-a067-66d7dd57e787" containerID="598e05f18695be5ea6f3b567f2b9b1d3e45a74e6b50d400b5b63c60444d4078b" exitCode=2 Oct 02 11:45:28 crc kubenswrapper[4783]: I1002 11:45:28.303191 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" event={"ID":"fa70f2b8-5bce-4ebe-a067-66d7dd57e787","Type":"ContainerDied","Data":"598e05f18695be5ea6f3b567f2b9b1d3e45a74e6b50d400b5b63c60444d4078b"} Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.727997 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.792755 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-ssh-key\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.792822 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-extra-config-0\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.792839 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-0\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.792892 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6pp6s\" (UniqueName: \"kubernetes.io/projected/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-kube-api-access-6pp6s\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.792944 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-1\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.792979 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-0\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.792997 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-1\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.793045 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-inventory\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.793887 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-combined-ca-bundle\") pod \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\" (UID: \"fa70f2b8-5bce-4ebe-a067-66d7dd57e787\") " Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.799023 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.818270 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-kube-api-access-6pp6s" (OuterVolumeSpecName: "kube-api-access-6pp6s") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "kube-api-access-6pp6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.831248 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.831293 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.835045 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.837534 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.841983 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.850058 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.856577 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-inventory" (OuterVolumeSpecName: "inventory") pod "fa70f2b8-5bce-4ebe-a067-66d7dd57e787" (UID: "fa70f2b8-5bce-4ebe-a067-66d7dd57e787"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897025 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6pp6s\" (UniqueName: \"kubernetes.io/projected/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-kube-api-access-6pp6s\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897111 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897142 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897169 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897199 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897225 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897249 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897274 4783 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:29 crc kubenswrapper[4783]: I1002 11:45:29.897296 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/fa70f2b8-5bce-4ebe-a067-66d7dd57e787-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:45:30 crc kubenswrapper[4783]: I1002 11:45:30.327009 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" event={"ID":"fa70f2b8-5bce-4ebe-a067-66d7dd57e787","Type":"ContainerDied","Data":"de31000724d7bd1f4df91adea5547adca3fec526b10b1887e0ccfc8156add284"} Oct 02 11:45:30 crc kubenswrapper[4783]: I1002 11:45:30.327054 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de31000724d7bd1f4df91adea5547adca3fec526b10b1887e0ccfc8156add284" Oct 02 11:45:30 crc kubenswrapper[4783]: I1002 11:45:30.327074 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-v8t7p" Oct 02 11:45:34 crc kubenswrapper[4783]: I1002 11:45:34.545560 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:45:34 crc kubenswrapper[4783]: E1002 11:45:34.546155 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:45:48 crc kubenswrapper[4783]: I1002 11:45:48.545294 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:45:48 crc kubenswrapper[4783]: E1002 11:45:48.546360 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:45:56 crc kubenswrapper[4783]: I1002 11:45:56.505697 4783 scope.go:117] "RemoveContainer" containerID="3a56660b14fac27f885845542ee4a326f5664eedb3c47c6bbf9317abe972d6eb" Oct 02 11:46:00 crc kubenswrapper[4783]: I1002 11:46:00.545545 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:46:00 crc kubenswrapper[4783]: E1002 11:46:00.546082 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.032427 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd"] Oct 02 11:46:07 crc kubenswrapper[4783]: E1002 11:46:07.033560 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c5338b7b-07a7-4d69-a213-eabbd45c454e" containerName="collect-profiles" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.033590 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c5338b7b-07a7-4d69-a213-eabbd45c454e" containerName="collect-profiles" Oct 02 11:46:07 crc kubenswrapper[4783]: E1002 11:46:07.033634 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fa70f2b8-5bce-4ebe-a067-66d7dd57e787" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.033644 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fa70f2b8-5bce-4ebe-a067-66d7dd57e787" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.033920 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c5338b7b-07a7-4d69-a213-eabbd45c454e" containerName="collect-profiles" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.033950 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="fa70f2b8-5bce-4ebe-a067-66d7dd57e787" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.035095 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.037124 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.037211 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.037326 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.037550 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.038152 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.039001 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.039804 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.050118 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd"] Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108289 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108341 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108614 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108710 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qblql\" (UniqueName: \"kubernetes.io/projected/ad8fdf40-da4c-42c2-a8cd-01675807c93c-kube-api-access-qblql\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108739 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108784 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108847 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.108882 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.109792 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211309 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qblql\" (UniqueName: \"kubernetes.io/projected/ad8fdf40-da4c-42c2-a8cd-01675807c93c-kube-api-access-qblql\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211351 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211379 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211399 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211419 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211530 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211574 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211604 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.211675 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.213113 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.217587 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.218051 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.218230 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.218619 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.219703 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.219774 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.220336 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.229117 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qblql\" (UniqueName: \"kubernetes.io/projected/ad8fdf40-da4c-42c2-a8cd-01675807c93c-kube-api-access-qblql\") pod \"nova-edpm-deployment-openstack-edpm-ipam-pxxzd\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.359252 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:46:07 crc kubenswrapper[4783]: I1002 11:46:07.895005 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd"] Oct 02 11:46:08 crc kubenswrapper[4783]: I1002 11:46:08.677205 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" event={"ID":"ad8fdf40-da4c-42c2-a8cd-01675807c93c","Type":"ContainerStarted","Data":"9d74f542cb3482edddf2ca618861dc5af3d1de7fe6da614e95643c4ca5c24026"} Oct 02 11:46:08 crc kubenswrapper[4783]: I1002 11:46:08.677804 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" event={"ID":"ad8fdf40-da4c-42c2-a8cd-01675807c93c","Type":"ContainerStarted","Data":"252d2d3cdaaa49667cb3dacb77ce0547b103dcf9f9b296ee27dbe2fa1f96d7a4"} Oct 02 11:46:11 crc kubenswrapper[4783]: I1002 11:46:11.544733 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:46:11 crc kubenswrapper[4783]: E1002 11:46:11.544960 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:46:22 crc kubenswrapper[4783]: I1002 11:46:22.546148 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:46:22 crc kubenswrapper[4783]: E1002 11:46:22.546949 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:46:36 crc kubenswrapper[4783]: I1002 11:46:36.544997 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:46:36 crc kubenswrapper[4783]: E1002 11:46:36.545878 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:46:48 crc kubenswrapper[4783]: I1002 11:46:48.545075 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:46:48 crc kubenswrapper[4783]: E1002 11:46:48.546031 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:47:00 crc kubenswrapper[4783]: I1002 11:47:00.546485 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:47:01 crc kubenswrapper[4783]: I1002 11:47:01.122215 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"d3853f8a29225709d658ac8d5245ea2a3fb7299d549e11cab89a0fd96341336e"} Oct 02 11:47:01 crc kubenswrapper[4783]: I1002 11:47:01.143707 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" podStartSLOduration=53.96320483 podStartE2EDuration="54.143687248s" podCreationTimestamp="2025-10-02 11:46:07 +0000 UTC" firstStartedPulling="2025-10-02 11:46:07.900178738 +0000 UTC m=+3201.216373009" lastFinishedPulling="2025-10-02 11:46:08.080661166 +0000 UTC m=+3201.396855427" observedRunningTime="2025-10-02 11:46:08.700895277 +0000 UTC m=+3202.017089538" watchObservedRunningTime="2025-10-02 11:47:01.143687248 +0000 UTC m=+3254.459881509" Oct 02 11:47:20 crc kubenswrapper[4783]: I1002 11:47:20.303081 4783 generic.go:334] "Generic (PLEG): container finished" podID="ad8fdf40-da4c-42c2-a8cd-01675807c93c" containerID="9d74f542cb3482edddf2ca618861dc5af3d1de7fe6da614e95643c4ca5c24026" exitCode=2 Oct 02 11:47:20 crc kubenswrapper[4783]: I1002 11:47:20.303177 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" event={"ID":"ad8fdf40-da4c-42c2-a8cd-01675807c93c","Type":"ContainerDied","Data":"9d74f542cb3482edddf2ca618861dc5af3d1de7fe6da614e95643c4ca5c24026"} Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.696674 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.797030 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-ssh-key\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.797150 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-0\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.797293 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-extra-config-0\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.798164 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-combined-ca-bundle\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.798289 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-1\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.798493 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-inventory\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.798627 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qblql\" (UniqueName: \"kubernetes.io/projected/ad8fdf40-da4c-42c2-a8cd-01675807c93c-kube-api-access-qblql\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.798743 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-1\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.799158 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-0\") pod \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\" (UID: \"ad8fdf40-da4c-42c2-a8cd-01675807c93c\") " Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.803744 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.804168 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad8fdf40-da4c-42c2-a8cd-01675807c93c-kube-api-access-qblql" (OuterVolumeSpecName: "kube-api-access-qblql") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "kube-api-access-qblql". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.830786 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-inventory" (OuterVolumeSpecName: "inventory") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.832405 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.842921 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.843606 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.846593 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.849528 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.862257 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "ad8fdf40-da4c-42c2-a8cd-01675807c93c" (UID: "ad8fdf40-da4c-42c2-a8cd-01675807c93c"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902353 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902400 4783 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902430 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902442 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902456 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902466 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qblql\" (UniqueName: \"kubernetes.io/projected/ad8fdf40-da4c-42c2-a8cd-01675807c93c-kube-api-access-qblql\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902477 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902489 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:21 crc kubenswrapper[4783]: I1002 11:47:21.902501 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ad8fdf40-da4c-42c2-a8cd-01675807c93c-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:47:22 crc kubenswrapper[4783]: I1002 11:47:22.331395 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" event={"ID":"ad8fdf40-da4c-42c2-a8cd-01675807c93c","Type":"ContainerDied","Data":"252d2d3cdaaa49667cb3dacb77ce0547b103dcf9f9b296ee27dbe2fa1f96d7a4"} Oct 02 11:47:22 crc kubenswrapper[4783]: I1002 11:47:22.331469 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="252d2d3cdaaa49667cb3dacb77ce0547b103dcf9f9b296ee27dbe2fa1f96d7a4" Oct 02 11:47:22 crc kubenswrapper[4783]: I1002 11:47:22.331539 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-pxxzd" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.029455 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m"] Oct 02 11:48:39 crc kubenswrapper[4783]: E1002 11:48:39.031722 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad8fdf40-da4c-42c2-a8cd-01675807c93c" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.031823 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad8fdf40-da4c-42c2-a8cd-01675807c93c" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.032185 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad8fdf40-da4c-42c2-a8cd-01675807c93c" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.033133 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.036525 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.036762 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.040854 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.041119 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.041124 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.041191 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.044890 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m"] Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.049892 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.127367 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.127640 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.127846 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.127912 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.128012 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.128259 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.128298 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.128541 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gcwz8\" (UniqueName: \"kubernetes.io/projected/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-kube-api-access-gcwz8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.128626 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230137 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230199 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230247 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230273 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230301 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230391 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230445 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230530 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gcwz8\" (UniqueName: \"kubernetes.io/projected/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-kube-api-access-gcwz8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.230568 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.232108 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.236435 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.236747 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.236748 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.239280 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.239781 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.240345 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.246322 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.250262 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gcwz8\" (UniqueName: \"kubernetes.io/projected/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-kube-api-access-gcwz8\") pod \"nova-edpm-deployment-openstack-edpm-ipam-h695m\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.354857 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.885357 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m"] Oct 02 11:48:39 crc kubenswrapper[4783]: I1002 11:48:39.996024 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" event={"ID":"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc","Type":"ContainerStarted","Data":"3190233da6c64640c2f1bff211d372073713bb4810eb0033d657b97d5b6d94b0"} Oct 02 11:48:41 crc kubenswrapper[4783]: I1002 11:48:41.006264 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" event={"ID":"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc","Type":"ContainerStarted","Data":"194140f5f8975d7134311ed546045382d5d5563ab216d83b39d9706d9568e4c4"} Oct 02 11:48:41 crc kubenswrapper[4783]: I1002 11:48:41.023735 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" podStartSLOduration=1.419506369 podStartE2EDuration="2.023714225s" podCreationTimestamp="2025-10-02 11:48:39 +0000 UTC" firstStartedPulling="2025-10-02 11:48:39.875147596 +0000 UTC m=+3353.191341857" lastFinishedPulling="2025-10-02 11:48:40.479355452 +0000 UTC m=+3353.795549713" observedRunningTime="2025-10-02 11:48:41.021793213 +0000 UTC m=+3354.337987514" watchObservedRunningTime="2025-10-02 11:48:41.023714225 +0000 UTC m=+3354.339908486" Oct 02 11:49:21 crc kubenswrapper[4783]: I1002 11:49:21.514097 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:49:21 crc kubenswrapper[4783]: I1002 11:49:21.514710 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.119288 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wdgbn"] Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.126465 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.135594 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdgbn"] Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.300403 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-catalog-content\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.300491 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-utilities\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.300786 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mqhh2\" (UniqueName: \"kubernetes.io/projected/22fa4069-4270-4316-b768-71ea003f7e9f-kube-api-access-mqhh2\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.402165 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-catalog-content\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.402503 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-utilities\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.402620 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mqhh2\" (UniqueName: \"kubernetes.io/projected/22fa4069-4270-4316-b768-71ea003f7e9f-kube-api-access-mqhh2\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.402758 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-catalog-content\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.402925 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-utilities\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.428526 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mqhh2\" (UniqueName: \"kubernetes.io/projected/22fa4069-4270-4316-b768-71ea003f7e9f-kube-api-access-mqhh2\") pod \"redhat-operators-wdgbn\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.445063 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:43 crc kubenswrapper[4783]: I1002 11:49:43.952007 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdgbn"] Oct 02 11:49:44 crc kubenswrapper[4783]: I1002 11:49:44.537633 4783 generic.go:334] "Generic (PLEG): container finished" podID="22fa4069-4270-4316-b768-71ea003f7e9f" containerID="45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb" exitCode=0 Oct 02 11:49:44 crc kubenswrapper[4783]: I1002 11:49:44.537784 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdgbn" event={"ID":"22fa4069-4270-4316-b768-71ea003f7e9f","Type":"ContainerDied","Data":"45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb"} Oct 02 11:49:44 crc kubenswrapper[4783]: I1002 11:49:44.537883 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdgbn" event={"ID":"22fa4069-4270-4316-b768-71ea003f7e9f","Type":"ContainerStarted","Data":"2b9e1c03225c6437ed7d95a7fedeeadd5902d36f7f44c5a475da583128d2792b"} Oct 02 11:49:44 crc kubenswrapper[4783]: I1002 11:49:44.540347 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:49:46 crc kubenswrapper[4783]: I1002 11:49:46.555558 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdgbn" event={"ID":"22fa4069-4270-4316-b768-71ea003f7e9f","Type":"ContainerStarted","Data":"a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe"} Oct 02 11:49:50 crc kubenswrapper[4783]: I1002 11:49:50.605087 4783 generic.go:334] "Generic (PLEG): container finished" podID="22fa4069-4270-4316-b768-71ea003f7e9f" containerID="a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe" exitCode=0 Oct 02 11:49:50 crc kubenswrapper[4783]: I1002 11:49:50.605133 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdgbn" event={"ID":"22fa4069-4270-4316-b768-71ea003f7e9f","Type":"ContainerDied","Data":"a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe"} Oct 02 11:49:51 crc kubenswrapper[4783]: I1002 11:49:51.513749 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:49:51 crc kubenswrapper[4783]: I1002 11:49:51.514458 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:49:51 crc kubenswrapper[4783]: I1002 11:49:51.615527 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdgbn" event={"ID":"22fa4069-4270-4316-b768-71ea003f7e9f","Type":"ContainerStarted","Data":"cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6"} Oct 02 11:49:51 crc kubenswrapper[4783]: I1002 11:49:51.645572 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wdgbn" podStartSLOduration=2.131926736 podStartE2EDuration="8.645552785s" podCreationTimestamp="2025-10-02 11:49:43 +0000 UTC" firstStartedPulling="2025-10-02 11:49:44.539956192 +0000 UTC m=+3417.856150473" lastFinishedPulling="2025-10-02 11:49:51.053582221 +0000 UTC m=+3424.369776522" observedRunningTime="2025-10-02 11:49:51.636763947 +0000 UTC m=+3424.952958208" watchObservedRunningTime="2025-10-02 11:49:51.645552785 +0000 UTC m=+3424.961747046" Oct 02 11:49:53 crc kubenswrapper[4783]: I1002 11:49:53.445740 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:53 crc kubenswrapper[4783]: I1002 11:49:53.446327 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:49:53 crc kubenswrapper[4783]: I1002 11:49:53.633433 4783 generic.go:334] "Generic (PLEG): container finished" podID="2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" containerID="194140f5f8975d7134311ed546045382d5d5563ab216d83b39d9706d9568e4c4" exitCode=2 Oct 02 11:49:53 crc kubenswrapper[4783]: I1002 11:49:53.633479 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" event={"ID":"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc","Type":"ContainerDied","Data":"194140f5f8975d7134311ed546045382d5d5563ab216d83b39d9706d9568e4c4"} Oct 02 11:49:54 crc kubenswrapper[4783]: I1002 11:49:54.488830 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wdgbn" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="registry-server" probeResult="failure" output=< Oct 02 11:49:54 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:49:54 crc kubenswrapper[4783]: > Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.064364 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229119 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-0\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229197 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-1\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229250 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-extra-config-0\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229304 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-1\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229323 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-ssh-key\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229403 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-0\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229451 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-combined-ca-bundle\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229494 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gcwz8\" (UniqueName: \"kubernetes.io/projected/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-kube-api-access-gcwz8\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.229521 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-inventory\") pod \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\" (UID: \"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc\") " Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.234913 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.235747 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-kube-api-access-gcwz8" (OuterVolumeSpecName: "kube-api-access-gcwz8") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "kube-api-access-gcwz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.259225 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.261990 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.264392 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.270776 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.271703 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.276135 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-inventory" (OuterVolumeSpecName: "inventory") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.286564 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" (UID: "2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331782 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gcwz8\" (UniqueName: \"kubernetes.io/projected/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-kube-api-access-gcwz8\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331819 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331830 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331840 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331848 4783 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331857 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331865 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331874 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.331882 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.653291 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" event={"ID":"2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc","Type":"ContainerDied","Data":"3190233da6c64640c2f1bff211d372073713bb4810eb0033d657b97d5b6d94b0"} Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.654361 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3190233da6c64640c2f1bff211d372073713bb4810eb0033d657b97d5b6d94b0" Oct 02 11:49:55 crc kubenswrapper[4783]: I1002 11:49:55.653391 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-h695m" Oct 02 11:50:04 crc kubenswrapper[4783]: I1002 11:50:04.494721 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-wdgbn" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="registry-server" probeResult="failure" output=< Oct 02 11:50:04 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:50:04 crc kubenswrapper[4783]: > Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.909017 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-xnltc"] Oct 02 11:50:06 crc kubenswrapper[4783]: E1002 11:50:06.909769 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.909782 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.909995 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.911389 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.921989 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnltc"] Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.943346 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-utilities\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.943630 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-catalog-content\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:06 crc kubenswrapper[4783]: I1002 11:50:06.943660 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wtjxq\" (UniqueName: \"kubernetes.io/projected/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-kube-api-access-wtjxq\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.045863 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-catalog-content\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.045922 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wtjxq\" (UniqueName: \"kubernetes.io/projected/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-kube-api-access-wtjxq\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.046022 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-utilities\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.046426 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-catalog-content\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.046453 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-utilities\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.073858 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wtjxq\" (UniqueName: \"kubernetes.io/projected/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-kube-api-access-wtjxq\") pod \"redhat-marketplace-xnltc\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.240535 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.679887 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnltc"] Oct 02 11:50:07 crc kubenswrapper[4783]: W1002 11:50:07.685272 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda0bdb52b_31fd_4e16_98bc_3c9e65846bda.slice/crio-87af2572812c1b90da14150279602171ed07b0504e4fb08483ba2e1e845e3918 WatchSource:0}: Error finding container 87af2572812c1b90da14150279602171ed07b0504e4fb08483ba2e1e845e3918: Status 404 returned error can't find the container with id 87af2572812c1b90da14150279602171ed07b0504e4fb08483ba2e1e845e3918 Oct 02 11:50:07 crc kubenswrapper[4783]: I1002 11:50:07.784317 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnltc" event={"ID":"a0bdb52b-31fd-4e16-98bc-3c9e65846bda","Type":"ContainerStarted","Data":"87af2572812c1b90da14150279602171ed07b0504e4fb08483ba2e1e845e3918"} Oct 02 11:50:08 crc kubenswrapper[4783]: I1002 11:50:08.793498 4783 generic.go:334] "Generic (PLEG): container finished" podID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerID="5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df" exitCode=0 Oct 02 11:50:08 crc kubenswrapper[4783]: I1002 11:50:08.793720 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnltc" event={"ID":"a0bdb52b-31fd-4e16-98bc-3c9e65846bda","Type":"ContainerDied","Data":"5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df"} Oct 02 11:50:10 crc kubenswrapper[4783]: I1002 11:50:10.813618 4783 generic.go:334] "Generic (PLEG): container finished" podID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerID="a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa" exitCode=0 Oct 02 11:50:10 crc kubenswrapper[4783]: I1002 11:50:10.813754 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnltc" event={"ID":"a0bdb52b-31fd-4e16-98bc-3c9e65846bda","Type":"ContainerDied","Data":"a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa"} Oct 02 11:50:11 crc kubenswrapper[4783]: I1002 11:50:11.825000 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnltc" event={"ID":"a0bdb52b-31fd-4e16-98bc-3c9e65846bda","Type":"ContainerStarted","Data":"f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28"} Oct 02 11:50:13 crc kubenswrapper[4783]: I1002 11:50:13.493442 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:50:13 crc kubenswrapper[4783]: I1002 11:50:13.514921 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-xnltc" podStartSLOduration=5.07477123 podStartE2EDuration="7.51490208s" podCreationTimestamp="2025-10-02 11:50:06 +0000 UTC" firstStartedPulling="2025-10-02 11:50:08.79682803 +0000 UTC m=+3442.113022291" lastFinishedPulling="2025-10-02 11:50:11.23695888 +0000 UTC m=+3444.553153141" observedRunningTime="2025-10-02 11:50:11.850857885 +0000 UTC m=+3445.167052166" watchObservedRunningTime="2025-10-02 11:50:13.51490208 +0000 UTC m=+3446.831096341" Oct 02 11:50:13 crc kubenswrapper[4783]: I1002 11:50:13.538620 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:50:14 crc kubenswrapper[4783]: I1002 11:50:14.277787 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdgbn"] Oct 02 11:50:14 crc kubenswrapper[4783]: I1002 11:50:14.853852 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wdgbn" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="registry-server" containerID="cri-o://cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6" gracePeriod=2 Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.271352 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.396262 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-catalog-content\") pod \"22fa4069-4270-4316-b768-71ea003f7e9f\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.396334 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-utilities\") pod \"22fa4069-4270-4316-b768-71ea003f7e9f\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.396520 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mqhh2\" (UniqueName: \"kubernetes.io/projected/22fa4069-4270-4316-b768-71ea003f7e9f-kube-api-access-mqhh2\") pod \"22fa4069-4270-4316-b768-71ea003f7e9f\" (UID: \"22fa4069-4270-4316-b768-71ea003f7e9f\") " Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.397368 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-utilities" (OuterVolumeSpecName: "utilities") pod "22fa4069-4270-4316-b768-71ea003f7e9f" (UID: "22fa4069-4270-4316-b768-71ea003f7e9f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.402034 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22fa4069-4270-4316-b768-71ea003f7e9f-kube-api-access-mqhh2" (OuterVolumeSpecName: "kube-api-access-mqhh2") pod "22fa4069-4270-4316-b768-71ea003f7e9f" (UID: "22fa4069-4270-4316-b768-71ea003f7e9f"). InnerVolumeSpecName "kube-api-access-mqhh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.470853 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "22fa4069-4270-4316-b768-71ea003f7e9f" (UID: "22fa4069-4270-4316-b768-71ea003f7e9f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.499241 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mqhh2\" (UniqueName: \"kubernetes.io/projected/22fa4069-4270-4316-b768-71ea003f7e9f-kube-api-access-mqhh2\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.499519 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.499613 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/22fa4069-4270-4316-b768-71ea003f7e9f-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.866060 4783 generic.go:334] "Generic (PLEG): container finished" podID="22fa4069-4270-4316-b768-71ea003f7e9f" containerID="cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6" exitCode=0 Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.866443 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdgbn" event={"ID":"22fa4069-4270-4316-b768-71ea003f7e9f","Type":"ContainerDied","Data":"cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6"} Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.868222 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdgbn" event={"ID":"22fa4069-4270-4316-b768-71ea003f7e9f","Type":"ContainerDied","Data":"2b9e1c03225c6437ed7d95a7fedeeadd5902d36f7f44c5a475da583128d2792b"} Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.867360 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdgbn" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.868274 4783 scope.go:117] "RemoveContainer" containerID="cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.895428 4783 scope.go:117] "RemoveContainer" containerID="a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.900734 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdgbn"] Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.908865 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wdgbn"] Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.920684 4783 scope.go:117] "RemoveContainer" containerID="45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.968155 4783 scope.go:117] "RemoveContainer" containerID="cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6" Oct 02 11:50:15 crc kubenswrapper[4783]: E1002 11:50:15.968660 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6\": container with ID starting with cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6 not found: ID does not exist" containerID="cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.968701 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6"} err="failed to get container status \"cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6\": rpc error: code = NotFound desc = could not find container \"cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6\": container with ID starting with cbbfb9d5e2a03420d7afbd9e1ad58ec2214ddffa3d40cb8f9c30870625700fb6 not found: ID does not exist" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.968723 4783 scope.go:117] "RemoveContainer" containerID="a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe" Oct 02 11:50:15 crc kubenswrapper[4783]: E1002 11:50:15.969197 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe\": container with ID starting with a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe not found: ID does not exist" containerID="a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.969227 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe"} err="failed to get container status \"a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe\": rpc error: code = NotFound desc = could not find container \"a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe\": container with ID starting with a6cbad794b51d4a8f93326b621a0ad7410d4c2bb1e3f873f88fb1092e0fd9ebe not found: ID does not exist" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.969241 4783 scope.go:117] "RemoveContainer" containerID="45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb" Oct 02 11:50:15 crc kubenswrapper[4783]: E1002 11:50:15.969604 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb\": container with ID starting with 45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb not found: ID does not exist" containerID="45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb" Oct 02 11:50:15 crc kubenswrapper[4783]: I1002 11:50:15.969630 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb"} err="failed to get container status \"45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb\": rpc error: code = NotFound desc = could not find container \"45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb\": container with ID starting with 45476c7e6a34b42c528cafb37b95af4f58b25277809b72b6dcac9713365b3adb not found: ID does not exist" Oct 02 11:50:17 crc kubenswrapper[4783]: I1002 11:50:17.241107 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:17 crc kubenswrapper[4783]: I1002 11:50:17.241685 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:17 crc kubenswrapper[4783]: I1002 11:50:17.300476 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:17 crc kubenswrapper[4783]: I1002 11:50:17.559342 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" path="/var/lib/kubelet/pods/22fa4069-4270-4316-b768-71ea003f7e9f/volumes" Oct 02 11:50:17 crc kubenswrapper[4783]: I1002 11:50:17.929934 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:18 crc kubenswrapper[4783]: I1002 11:50:18.677988 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnltc"] Oct 02 11:50:19 crc kubenswrapper[4783]: I1002 11:50:19.899726 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-xnltc" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="registry-server" containerID="cri-o://f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28" gracePeriod=2 Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.337161 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.493308 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wtjxq\" (UniqueName: \"kubernetes.io/projected/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-kube-api-access-wtjxq\") pod \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.493502 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-utilities\") pod \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.493666 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-catalog-content\") pod \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\" (UID: \"a0bdb52b-31fd-4e16-98bc-3c9e65846bda\") " Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.494462 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-utilities" (OuterVolumeSpecName: "utilities") pod "a0bdb52b-31fd-4e16-98bc-3c9e65846bda" (UID: "a0bdb52b-31fd-4e16-98bc-3c9e65846bda"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.499276 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-kube-api-access-wtjxq" (OuterVolumeSpecName: "kube-api-access-wtjxq") pod "a0bdb52b-31fd-4e16-98bc-3c9e65846bda" (UID: "a0bdb52b-31fd-4e16-98bc-3c9e65846bda"). InnerVolumeSpecName "kube-api-access-wtjxq". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.506992 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a0bdb52b-31fd-4e16-98bc-3c9e65846bda" (UID: "a0bdb52b-31fd-4e16-98bc-3c9e65846bda"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.595591 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wtjxq\" (UniqueName: \"kubernetes.io/projected/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-kube-api-access-wtjxq\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.595639 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.595649 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a0bdb52b-31fd-4e16-98bc-3c9e65846bda-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.910590 4783 generic.go:334] "Generic (PLEG): container finished" podID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerID="f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28" exitCode=0 Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.910640 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnltc" event={"ID":"a0bdb52b-31fd-4e16-98bc-3c9e65846bda","Type":"ContainerDied","Data":"f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28"} Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.910676 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-xnltc" event={"ID":"a0bdb52b-31fd-4e16-98bc-3c9e65846bda","Type":"ContainerDied","Data":"87af2572812c1b90da14150279602171ed07b0504e4fb08483ba2e1e845e3918"} Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.910682 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-xnltc" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.910696 4783 scope.go:117] "RemoveContainer" containerID="f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.938666 4783 scope.go:117] "RemoveContainer" containerID="a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa" Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.950047 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnltc"] Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.958802 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-xnltc"] Oct 02 11:50:20 crc kubenswrapper[4783]: I1002 11:50:20.970703 4783 scope.go:117] "RemoveContainer" containerID="5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.018283 4783 scope.go:117] "RemoveContainer" containerID="f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28" Oct 02 11:50:21 crc kubenswrapper[4783]: E1002 11:50:21.018892 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28\": container with ID starting with f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28 not found: ID does not exist" containerID="f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.018952 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28"} err="failed to get container status \"f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28\": rpc error: code = NotFound desc = could not find container \"f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28\": container with ID starting with f0ceb882971dd94acdf13b7967fbb1608c46645c213dac16b62b4be2321f0f28 not found: ID does not exist" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.018987 4783 scope.go:117] "RemoveContainer" containerID="a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa" Oct 02 11:50:21 crc kubenswrapper[4783]: E1002 11:50:21.019444 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa\": container with ID starting with a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa not found: ID does not exist" containerID="a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.019477 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa"} err="failed to get container status \"a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa\": rpc error: code = NotFound desc = could not find container \"a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa\": container with ID starting with a9817e06114a2187ee8e2782019a2f2f08616a84727b8fe2043e5a2f1b1a83aa not found: ID does not exist" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.019497 4783 scope.go:117] "RemoveContainer" containerID="5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df" Oct 02 11:50:21 crc kubenswrapper[4783]: E1002 11:50:21.020076 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df\": container with ID starting with 5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df not found: ID does not exist" containerID="5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.020103 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df"} err="failed to get container status \"5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df\": rpc error: code = NotFound desc = could not find container \"5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df\": container with ID starting with 5280524ebb8caae657f62c0a7ea18a6d746be3717b7034b047709fec94eda6df not found: ID does not exist" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.513969 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.514053 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.514110 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.514818 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d3853f8a29225709d658ac8d5245ea2a3fb7299d549e11cab89a0fd96341336e"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.514892 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://d3853f8a29225709d658ac8d5245ea2a3fb7299d549e11cab89a0fd96341336e" gracePeriod=600 Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.557743 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" path="/var/lib/kubelet/pods/a0bdb52b-31fd-4e16-98bc-3c9e65846bda/volumes" Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.931172 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="d3853f8a29225709d658ac8d5245ea2a3fb7299d549e11cab89a0fd96341336e" exitCode=0 Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.931325 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"d3853f8a29225709d658ac8d5245ea2a3fb7299d549e11cab89a0fd96341336e"} Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.932329 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3"} Oct 02 11:50:21 crc kubenswrapper[4783]: I1002 11:50:21.932362 4783 scope.go:117] "RemoveContainer" containerID="16e46e78a08cfb5a1610f0efb729bfc7f610a79babf01966444bacf916b3acb9" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.093268 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-4wztk"] Oct 02 11:50:24 crc kubenswrapper[4783]: E1002 11:50:24.094059 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="extract-utilities" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094071 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="extract-utilities" Oct 02 11:50:24 crc kubenswrapper[4783]: E1002 11:50:24.094084 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="registry-server" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094090 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="registry-server" Oct 02 11:50:24 crc kubenswrapper[4783]: E1002 11:50:24.094099 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="registry-server" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094105 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="registry-server" Oct 02 11:50:24 crc kubenswrapper[4783]: E1002 11:50:24.094120 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="extract-content" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094126 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="extract-content" Oct 02 11:50:24 crc kubenswrapper[4783]: E1002 11:50:24.094136 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="extract-utilities" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094141 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="extract-utilities" Oct 02 11:50:24 crc kubenswrapper[4783]: E1002 11:50:24.094157 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="extract-content" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094163 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="extract-content" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094358 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="22fa4069-4270-4316-b768-71ea003f7e9f" containerName="registry-server" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.094377 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="a0bdb52b-31fd-4e16-98bc-3c9e65846bda" containerName="registry-server" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.095712 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.108469 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4wztk"] Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.165391 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-utilities\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.165544 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccvqc\" (UniqueName: \"kubernetes.io/projected/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-kube-api-access-ccvqc\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.165567 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-catalog-content\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.267110 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccvqc\" (UniqueName: \"kubernetes.io/projected/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-kube-api-access-ccvqc\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.267160 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-catalog-content\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.267367 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-utilities\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.267864 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-utilities\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.267869 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-catalog-content\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.293969 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccvqc\" (UniqueName: \"kubernetes.io/projected/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-kube-api-access-ccvqc\") pod \"community-operators-4wztk\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.411888 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.928051 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-4wztk"] Oct 02 11:50:24 crc kubenswrapper[4783]: W1002 11:50:24.931770 4783 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod67ed3cf2_fb7b_45a3_89ac_7bb9b22ae5f7.slice/crio-6f3ddab548919285139ec6c253f0fb771c046aefc8954b72a702a2735de140cb WatchSource:0}: Error finding container 6f3ddab548919285139ec6c253f0fb771c046aefc8954b72a702a2735de140cb: Status 404 returned error can't find the container with id 6f3ddab548919285139ec6c253f0fb771c046aefc8954b72a702a2735de140cb Oct 02 11:50:24 crc kubenswrapper[4783]: I1002 11:50:24.965376 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wztk" event={"ID":"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7","Type":"ContainerStarted","Data":"6f3ddab548919285139ec6c253f0fb771c046aefc8954b72a702a2735de140cb"} Oct 02 11:50:25 crc kubenswrapper[4783]: I1002 11:50:25.975612 4783 generic.go:334] "Generic (PLEG): container finished" podID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerID="c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319" exitCode=0 Oct 02 11:50:25 crc kubenswrapper[4783]: I1002 11:50:25.975713 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wztk" event={"ID":"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7","Type":"ContainerDied","Data":"c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319"} Oct 02 11:50:28 crc kubenswrapper[4783]: I1002 11:50:28.008925 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wztk" event={"ID":"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7","Type":"ContainerStarted","Data":"5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c"} Oct 02 11:50:29 crc kubenswrapper[4783]: I1002 11:50:29.019063 4783 generic.go:334] "Generic (PLEG): container finished" podID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerID="5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c" exitCode=0 Oct 02 11:50:29 crc kubenswrapper[4783]: I1002 11:50:29.019160 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wztk" event={"ID":"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7","Type":"ContainerDied","Data":"5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c"} Oct 02 11:50:30 crc kubenswrapper[4783]: I1002 11:50:30.030165 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wztk" event={"ID":"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7","Type":"ContainerStarted","Data":"d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6"} Oct 02 11:50:30 crc kubenswrapper[4783]: I1002 11:50:30.052883 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-4wztk" podStartSLOduration=2.292519532 podStartE2EDuration="6.052858189s" podCreationTimestamp="2025-10-02 11:50:24 +0000 UTC" firstStartedPulling="2025-10-02 11:50:25.979171511 +0000 UTC m=+3459.295365772" lastFinishedPulling="2025-10-02 11:50:29.739510118 +0000 UTC m=+3463.055704429" observedRunningTime="2025-10-02 11:50:30.04661973 +0000 UTC m=+3463.362814001" watchObservedRunningTime="2025-10-02 11:50:30.052858189 +0000 UTC m=+3463.369052450" Oct 02 11:50:34 crc kubenswrapper[4783]: I1002 11:50:34.412431 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:34 crc kubenswrapper[4783]: I1002 11:50:34.412860 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:34 crc kubenswrapper[4783]: I1002 11:50:34.458478 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:35 crc kubenswrapper[4783]: I1002 11:50:35.118398 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:35 crc kubenswrapper[4783]: I1002 11:50:35.167658 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4wztk"] Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.087318 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-4wztk" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="registry-server" containerID="cri-o://d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6" gracePeriod=2 Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.529811 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.709929 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-utilities\") pod \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.710447 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ccvqc\" (UniqueName: \"kubernetes.io/projected/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-kube-api-access-ccvqc\") pod \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.710721 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-catalog-content\") pod \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\" (UID: \"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7\") " Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.711609 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-utilities" (OuterVolumeSpecName: "utilities") pod "67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" (UID: "67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.717375 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-kube-api-access-ccvqc" (OuterVolumeSpecName: "kube-api-access-ccvqc") pod "67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" (UID: "67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7"). InnerVolumeSpecName "kube-api-access-ccvqc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.767518 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" (UID: "67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.813086 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.813124 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:37 crc kubenswrapper[4783]: I1002 11:50:37.813134 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ccvqc\" (UniqueName: \"kubernetes.io/projected/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7-kube-api-access-ccvqc\") on node \"crc\" DevicePath \"\"" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.103154 4783 generic.go:334] "Generic (PLEG): container finished" podID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerID="d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6" exitCode=0 Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.103544 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-4wztk" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.104699 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wztk" event={"ID":"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7","Type":"ContainerDied","Data":"d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6"} Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.104798 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-4wztk" event={"ID":"67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7","Type":"ContainerDied","Data":"6f3ddab548919285139ec6c253f0fb771c046aefc8954b72a702a2735de140cb"} Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.104859 4783 scope.go:117] "RemoveContainer" containerID="d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.136868 4783 scope.go:117] "RemoveContainer" containerID="5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.137676 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-4wztk"] Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.144593 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-4wztk"] Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.158061 4783 scope.go:117] "RemoveContainer" containerID="c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.200787 4783 scope.go:117] "RemoveContainer" containerID="d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6" Oct 02 11:50:38 crc kubenswrapper[4783]: E1002 11:50:38.201186 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6\": container with ID starting with d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6 not found: ID does not exist" containerID="d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.201224 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6"} err="failed to get container status \"d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6\": rpc error: code = NotFound desc = could not find container \"d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6\": container with ID starting with d5a190fd6a9782b1dbfbe0c7994ff272a974bbf7619da0fd73b651daf7c5d3b6 not found: ID does not exist" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.201250 4783 scope.go:117] "RemoveContainer" containerID="5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c" Oct 02 11:50:38 crc kubenswrapper[4783]: E1002 11:50:38.202114 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c\": container with ID starting with 5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c not found: ID does not exist" containerID="5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.202204 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c"} err="failed to get container status \"5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c\": rpc error: code = NotFound desc = could not find container \"5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c\": container with ID starting with 5fc007ccfcfa10adece4411ce8ee3dfad28c9125b136bb6b3318e312a65dda6c not found: ID does not exist" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.202271 4783 scope.go:117] "RemoveContainer" containerID="c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319" Oct 02 11:50:38 crc kubenswrapper[4783]: E1002 11:50:38.202718 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319\": container with ID starting with c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319 not found: ID does not exist" containerID="c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319" Oct 02 11:50:38 crc kubenswrapper[4783]: I1002 11:50:38.202756 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319"} err="failed to get container status \"c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319\": rpc error: code = NotFound desc = could not find container \"c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319\": container with ID starting with c02c9059f50a7ba0f79e20e9dc77f8030106bdd731866b3f482b33e7b0b0b319 not found: ID does not exist" Oct 02 11:50:39 crc kubenswrapper[4783]: I1002 11:50:39.559717 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" path="/var/lib/kubelet/pods/67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7/volumes" Oct 02 11:52:21 crc kubenswrapper[4783]: I1002 11:52:21.513583 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:52:21 crc kubenswrapper[4783]: I1002 11:52:21.514203 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.027078 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr"] Oct 02 11:52:32 crc kubenswrapper[4783]: E1002 11:52:32.028075 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="extract-content" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.028094 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="extract-content" Oct 02 11:52:32 crc kubenswrapper[4783]: E1002 11:52:32.028118 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="registry-server" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.028126 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="registry-server" Oct 02 11:52:32 crc kubenswrapper[4783]: E1002 11:52:32.028138 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="extract-utilities" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.028147 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="extract-utilities" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.028375 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="67ed3cf2-fb7b-45a3-89ac-7bb9b22ae5f7" containerName="registry-server" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.029391 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.031392 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.031485 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.031517 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.031517 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.032551 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.032617 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.035610 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.052338 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr"] Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140014 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140090 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140152 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140190 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/3bb46b91-0791-4807-bcb8-20324d854a41-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140220 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140308 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7gwc\" (UniqueName: \"kubernetes.io/projected/3bb46b91-0791-4807-bcb8-20324d854a41-kube-api-access-t7gwc\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140405 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140476 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.140504 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.242982 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.243321 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.244163 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.244221 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/3bb46b91-0791-4807-bcb8-20324d854a41-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.244262 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.244528 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7gwc\" (UniqueName: \"kubernetes.io/projected/3bb46b91-0791-4807-bcb8-20324d854a41-kube-api-access-t7gwc\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.244790 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.244880 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.244924 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.245341 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/3bb46b91-0791-4807-bcb8-20324d854a41-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.253228 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.255008 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.255625 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.256980 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.257520 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.271726 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.277003 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.284387 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7gwc\" (UniqueName: \"kubernetes.io/projected/3bb46b91-0791-4807-bcb8-20324d854a41-kube-api-access-t7gwc\") pod \"nova-edpm-deployment-openstack-edpm-ipam-r4zjr\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.350356 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:52:32 crc kubenswrapper[4783]: I1002 11:52:32.896060 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr"] Oct 02 11:52:33 crc kubenswrapper[4783]: I1002 11:52:33.149902 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" event={"ID":"3bb46b91-0791-4807-bcb8-20324d854a41","Type":"ContainerStarted","Data":"54ff9834d0cba1f22bea83deef52695b87babf665935aef2cf7aea7ed4ac2ebe"} Oct 02 11:52:34 crc kubenswrapper[4783]: I1002 11:52:34.159818 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" event={"ID":"3bb46b91-0791-4807-bcb8-20324d854a41","Type":"ContainerStarted","Data":"59b4c1964f4878f01915601ddb45fc0ab7d578323b1c642e6ad4496fd54132b3"} Oct 02 11:52:51 crc kubenswrapper[4783]: I1002 11:52:51.514015 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:52:51 crc kubenswrapper[4783]: I1002 11:52:51.514668 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:53:21 crc kubenswrapper[4783]: I1002 11:53:21.514252 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 11:53:21 crc kubenswrapper[4783]: I1002 11:53:21.514936 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 11:53:21 crc kubenswrapper[4783]: I1002 11:53:21.514993 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 11:53:21 crc kubenswrapper[4783]: I1002 11:53:21.515915 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 11:53:21 crc kubenswrapper[4783]: I1002 11:53:21.515991 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" gracePeriod=600 Oct 02 11:53:21 crc kubenswrapper[4783]: E1002 11:53:21.639614 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:53:22 crc kubenswrapper[4783]: I1002 11:53:22.619212 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" exitCode=0 Oct 02 11:53:22 crc kubenswrapper[4783]: I1002 11:53:22.619279 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3"} Oct 02 11:53:22 crc kubenswrapper[4783]: I1002 11:53:22.619561 4783 scope.go:117] "RemoveContainer" containerID="d3853f8a29225709d658ac8d5245ea2a3fb7299d549e11cab89a0fd96341336e" Oct 02 11:53:22 crc kubenswrapper[4783]: I1002 11:53:22.620243 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:53:22 crc kubenswrapper[4783]: E1002 11:53:22.620563 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:53:22 crc kubenswrapper[4783]: I1002 11:53:22.642400 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" podStartSLOduration=50.267007631 podStartE2EDuration="50.642378885s" podCreationTimestamp="2025-10-02 11:52:32 +0000 UTC" firstStartedPulling="2025-10-02 11:52:32.898777847 +0000 UTC m=+3586.214972128" lastFinishedPulling="2025-10-02 11:52:33.274149121 +0000 UTC m=+3586.590343382" observedRunningTime="2025-10-02 11:52:34.191971482 +0000 UTC m=+3587.508165733" watchObservedRunningTime="2025-10-02 11:53:22.642378885 +0000 UTC m=+3635.958573146" Oct 02 11:53:36 crc kubenswrapper[4783]: I1002 11:53:36.545622 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:53:36 crc kubenswrapper[4783]: E1002 11:53:36.546488 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:53:46 crc kubenswrapper[4783]: I1002 11:53:46.832370 4783 generic.go:334] "Generic (PLEG): container finished" podID="3bb46b91-0791-4807-bcb8-20324d854a41" containerID="59b4c1964f4878f01915601ddb45fc0ab7d578323b1c642e6ad4496fd54132b3" exitCode=2 Oct 02 11:53:46 crc kubenswrapper[4783]: I1002 11:53:46.832455 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" event={"ID":"3bb46b91-0791-4807-bcb8-20324d854a41","Type":"ContainerDied","Data":"59b4c1964f4878f01915601ddb45fc0ab7d578323b1c642e6ad4496fd54132b3"} Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.217782 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.255067 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-combined-ca-bundle\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.255203 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-0\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.255250 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/3bb46b91-0791-4807-bcb8-20324d854a41-nova-extra-config-0\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.255276 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-1\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.255322 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-t7gwc\" (UniqueName: \"kubernetes.io/projected/3bb46b91-0791-4807-bcb8-20324d854a41-kube-api-access-t7gwc\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.255954 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-inventory\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.255991 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-1\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.256025 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-ssh-key\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.256103 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-0\") pod \"3bb46b91-0791-4807-bcb8-20324d854a41\" (UID: \"3bb46b91-0791-4807-bcb8-20324d854a41\") " Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.270667 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.277585 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3bb46b91-0791-4807-bcb8-20324d854a41-kube-api-access-t7gwc" (OuterVolumeSpecName: "kube-api-access-t7gwc") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "kube-api-access-t7gwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.287818 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3bb46b91-0791-4807-bcb8-20324d854a41-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.289722 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.290774 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-inventory" (OuterVolumeSpecName: "inventory") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.295263 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.297599 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.307353 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.307737 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "3bb46b91-0791-4807-bcb8-20324d854a41" (UID: "3bb46b91-0791-4807-bcb8-20324d854a41"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.357926 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.357955 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.357965 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.357974 4783 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/3bb46b91-0791-4807-bcb8-20324d854a41-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.357982 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.357991 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-t7gwc\" (UniqueName: \"kubernetes.io/projected/3bb46b91-0791-4807-bcb8-20324d854a41-kube-api-access-t7gwc\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.358001 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.358010 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.358325 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/3bb46b91-0791-4807-bcb8-20324d854a41-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.545758 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:53:48 crc kubenswrapper[4783]: E1002 11:53:48.546132 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.851508 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" event={"ID":"3bb46b91-0791-4807-bcb8-20324d854a41","Type":"ContainerDied","Data":"54ff9834d0cba1f22bea83deef52695b87babf665935aef2cf7aea7ed4ac2ebe"} Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.851550 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54ff9834d0cba1f22bea83deef52695b87babf665935aef2cf7aea7ed4ac2ebe" Oct 02 11:53:48 crc kubenswrapper[4783]: I1002 11:53:48.851552 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-r4zjr" Oct 02 11:54:03 crc kubenswrapper[4783]: I1002 11:54:03.544795 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:54:03 crc kubenswrapper[4783]: E1002 11:54:03.545793 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:54:17 crc kubenswrapper[4783]: I1002 11:54:17.551622 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:54:17 crc kubenswrapper[4783]: E1002 11:54:17.555428 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:54:19 crc kubenswrapper[4783]: I1002 11:54:19.977766 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-j24ql"] Oct 02 11:54:19 crc kubenswrapper[4783]: E1002 11:54:19.978537 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3bb46b91-0791-4807-bcb8-20324d854a41" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:54:19 crc kubenswrapper[4783]: I1002 11:54:19.978550 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="3bb46b91-0791-4807-bcb8-20324d854a41" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:54:19 crc kubenswrapper[4783]: I1002 11:54:19.978740 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="3bb46b91-0791-4807-bcb8-20324d854a41" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 11:54:19 crc kubenswrapper[4783]: I1002 11:54:19.980094 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:19 crc kubenswrapper[4783]: I1002 11:54:19.992106 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j24ql"] Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.142801 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-kube-api-access-tq4j9\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.142862 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-catalog-content\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.142956 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-utilities\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.244884 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-kube-api-access-tq4j9\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.245220 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-catalog-content\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.245442 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-utilities\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.245787 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-catalog-content\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.245981 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-utilities\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.269296 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-kube-api-access-tq4j9\") pod \"certified-operators-j24ql\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.312808 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:20 crc kubenswrapper[4783]: I1002 11:54:20.888984 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-j24ql"] Oct 02 11:54:21 crc kubenswrapper[4783]: I1002 11:54:21.141808 4783 generic.go:334] "Generic (PLEG): container finished" podID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerID="8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada" exitCode=0 Oct 02 11:54:21 crc kubenswrapper[4783]: I1002 11:54:21.141883 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24ql" event={"ID":"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9","Type":"ContainerDied","Data":"8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada"} Oct 02 11:54:21 crc kubenswrapper[4783]: I1002 11:54:21.143924 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24ql" event={"ID":"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9","Type":"ContainerStarted","Data":"e1d4853e71920940c9c3ea54d1e7b61ac67cd77b049558d419165ec29f43ce64"} Oct 02 11:54:23 crc kubenswrapper[4783]: I1002 11:54:23.165780 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24ql" event={"ID":"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9","Type":"ContainerStarted","Data":"d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580"} Oct 02 11:54:24 crc kubenswrapper[4783]: I1002 11:54:24.180767 4783 generic.go:334] "Generic (PLEG): container finished" podID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerID="d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580" exitCode=0 Oct 02 11:54:24 crc kubenswrapper[4783]: I1002 11:54:24.180832 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24ql" event={"ID":"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9","Type":"ContainerDied","Data":"d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580"} Oct 02 11:54:25 crc kubenswrapper[4783]: I1002 11:54:25.190330 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24ql" event={"ID":"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9","Type":"ContainerStarted","Data":"8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3"} Oct 02 11:54:25 crc kubenswrapper[4783]: I1002 11:54:25.214111 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-j24ql" podStartSLOduration=2.56658413 podStartE2EDuration="6.214090706s" podCreationTimestamp="2025-10-02 11:54:19 +0000 UTC" firstStartedPulling="2025-10-02 11:54:21.14380717 +0000 UTC m=+3694.460001431" lastFinishedPulling="2025-10-02 11:54:24.791313746 +0000 UTC m=+3698.107508007" observedRunningTime="2025-10-02 11:54:25.206177572 +0000 UTC m=+3698.522371833" watchObservedRunningTime="2025-10-02 11:54:25.214090706 +0000 UTC m=+3698.530284967" Oct 02 11:54:29 crc kubenswrapper[4783]: I1002 11:54:29.545556 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:54:29 crc kubenswrapper[4783]: E1002 11:54:29.546226 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:54:30 crc kubenswrapper[4783]: I1002 11:54:30.314391 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:30 crc kubenswrapper[4783]: I1002 11:54:30.314487 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:31 crc kubenswrapper[4783]: I1002 11:54:31.372499 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-j24ql" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="registry-server" probeResult="failure" output=< Oct 02 11:54:31 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 11:54:31 crc kubenswrapper[4783]: > Oct 02 11:54:40 crc kubenswrapper[4783]: I1002 11:54:40.371771 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:40 crc kubenswrapper[4783]: I1002 11:54:40.426483 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:40 crc kubenswrapper[4783]: I1002 11:54:40.611978 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j24ql"] Oct 02 11:54:42 crc kubenswrapper[4783]: I1002 11:54:42.338936 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-j24ql" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="registry-server" containerID="cri-o://8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3" gracePeriod=2 Oct 02 11:54:42 crc kubenswrapper[4783]: I1002 11:54:42.784802 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:42 crc kubenswrapper[4783]: I1002 11:54:42.968493 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-utilities\") pod \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " Oct 02 11:54:42 crc kubenswrapper[4783]: I1002 11:54:42.968547 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-catalog-content\") pod \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " Oct 02 11:54:42 crc kubenswrapper[4783]: I1002 11:54:42.968607 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-kube-api-access-tq4j9\") pod \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\" (UID: \"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9\") " Oct 02 11:54:42 crc kubenswrapper[4783]: I1002 11:54:42.969637 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-utilities" (OuterVolumeSpecName: "utilities") pod "fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" (UID: "fdf3e91b-4f36-4a7c-a900-ba7beb9352f9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:54:42 crc kubenswrapper[4783]: I1002 11:54:42.976084 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-kube-api-access-tq4j9" (OuterVolumeSpecName: "kube-api-access-tq4j9") pod "fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" (UID: "fdf3e91b-4f36-4a7c-a900-ba7beb9352f9"). InnerVolumeSpecName "kube-api-access-tq4j9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.029296 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" (UID: "fdf3e91b-4f36-4a7c-a900-ba7beb9352f9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.070629 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.070661 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.070676 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tq4j9\" (UniqueName: \"kubernetes.io/projected/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9-kube-api-access-tq4j9\") on node \"crc\" DevicePath \"\"" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.353290 4783 generic.go:334] "Generic (PLEG): container finished" podID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerID="8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3" exitCode=0 Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.353376 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24ql" event={"ID":"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9","Type":"ContainerDied","Data":"8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3"} Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.353491 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-j24ql" event={"ID":"fdf3e91b-4f36-4a7c-a900-ba7beb9352f9","Type":"ContainerDied","Data":"e1d4853e71920940c9c3ea54d1e7b61ac67cd77b049558d419165ec29f43ce64"} Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.353526 4783 scope.go:117] "RemoveContainer" containerID="8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.353635 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-j24ql" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.386162 4783 scope.go:117] "RemoveContainer" containerID="d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.388532 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-j24ql"] Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.399103 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-j24ql"] Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.410545 4783 scope.go:117] "RemoveContainer" containerID="8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.455279 4783 scope.go:117] "RemoveContainer" containerID="8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3" Oct 02 11:54:43 crc kubenswrapper[4783]: E1002 11:54:43.455880 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3\": container with ID starting with 8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3 not found: ID does not exist" containerID="8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.455923 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3"} err="failed to get container status \"8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3\": rpc error: code = NotFound desc = could not find container \"8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3\": container with ID starting with 8aaa98401518048d1497ccb0d484d494e9d2a611870b9513756e005577c63fd3 not found: ID does not exist" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.455956 4783 scope.go:117] "RemoveContainer" containerID="d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580" Oct 02 11:54:43 crc kubenswrapper[4783]: E1002 11:54:43.456300 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580\": container with ID starting with d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580 not found: ID does not exist" containerID="d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.456331 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580"} err="failed to get container status \"d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580\": rpc error: code = NotFound desc = could not find container \"d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580\": container with ID starting with d6dbf1dccd5e079f24f81019dd5bf7a6dce3c65948cf0dbe90d754a168126580 not found: ID does not exist" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.456352 4783 scope.go:117] "RemoveContainer" containerID="8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada" Oct 02 11:54:43 crc kubenswrapper[4783]: E1002 11:54:43.456901 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada\": container with ID starting with 8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada not found: ID does not exist" containerID="8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.456924 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada"} err="failed to get container status \"8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada\": rpc error: code = NotFound desc = could not find container \"8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada\": container with ID starting with 8789ff52f3a639423bc424282ae8389c68ed60b03f001bff080d567823227ada not found: ID does not exist" Oct 02 11:54:43 crc kubenswrapper[4783]: I1002 11:54:43.557522 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" path="/var/lib/kubelet/pods/fdf3e91b-4f36-4a7c-a900-ba7beb9352f9/volumes" Oct 02 11:54:44 crc kubenswrapper[4783]: I1002 11:54:44.546368 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:54:44 crc kubenswrapper[4783]: E1002 11:54:44.547386 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:54:58 crc kubenswrapper[4783]: I1002 11:54:58.545598 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:54:58 crc kubenswrapper[4783]: E1002 11:54:58.546378 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:55:13 crc kubenswrapper[4783]: I1002 11:55:13.545052 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:55:13 crc kubenswrapper[4783]: E1002 11:55:13.545750 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:55:25 crc kubenswrapper[4783]: I1002 11:55:25.545524 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:55:25 crc kubenswrapper[4783]: E1002 11:55:25.546196 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:55:36 crc kubenswrapper[4783]: I1002 11:55:36.545403 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:55:36 crc kubenswrapper[4783]: E1002 11:55:36.546208 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:55:51 crc kubenswrapper[4783]: I1002 11:55:51.546493 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:55:51 crc kubenswrapper[4783]: E1002 11:55:51.547781 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:56:04 crc kubenswrapper[4783]: I1002 11:56:04.546199 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:56:04 crc kubenswrapper[4783]: E1002 11:56:04.547096 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:56:15 crc kubenswrapper[4783]: I1002 11:56:15.545144 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:56:15 crc kubenswrapper[4783]: E1002 11:56:15.545858 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:56:26 crc kubenswrapper[4783]: I1002 11:56:26.544877 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:56:26 crc kubenswrapper[4783]: E1002 11:56:26.545761 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:56:37 crc kubenswrapper[4783]: I1002 11:56:37.552937 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:56:37 crc kubenswrapper[4783]: E1002 11:56:37.555191 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:56:48 crc kubenswrapper[4783]: I1002 11:56:48.545667 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:56:48 crc kubenswrapper[4783]: E1002 11:56:48.546464 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:56:59 crc kubenswrapper[4783]: I1002 11:56:59.545359 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:56:59 crc kubenswrapper[4783]: E1002 11:56:59.546413 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:57:11 crc kubenswrapper[4783]: I1002 11:57:11.545555 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:57:11 crc kubenswrapper[4783]: E1002 11:57:11.546231 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:57:23 crc kubenswrapper[4783]: I1002 11:57:23.544848 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:57:23 crc kubenswrapper[4783]: E1002 11:57:23.545738 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:57:35 crc kubenswrapper[4783]: I1002 11:57:35.545120 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:57:35 crc kubenswrapper[4783]: E1002 11:57:35.545909 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:57:48 crc kubenswrapper[4783]: I1002 11:57:48.544470 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:57:48 crc kubenswrapper[4783]: E1002 11:57:48.545013 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:58:00 crc kubenswrapper[4783]: I1002 11:58:00.545452 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:58:00 crc kubenswrapper[4783]: E1002 11:58:00.546157 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:58:15 crc kubenswrapper[4783]: I1002 11:58:15.545107 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:58:15 crc kubenswrapper[4783]: E1002 11:58:15.545872 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 11:58:28 crc kubenswrapper[4783]: I1002 11:58:28.545574 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 11:58:29 crc kubenswrapper[4783]: I1002 11:58:29.408653 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"aafa172fb7173020190d3f951ea5c261639271a72bfdacce3f5cf23237ef1dd6"} Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.034116 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z"] Oct 02 11:59:06 crc kubenswrapper[4783]: E1002 11:59:06.035029 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="extract-content" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.035046 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="extract-content" Oct 02 11:59:06 crc kubenswrapper[4783]: E1002 11:59:06.035067 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="registry-server" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.035078 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="registry-server" Oct 02 11:59:06 crc kubenswrapper[4783]: E1002 11:59:06.035091 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="extract-utilities" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.035101 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="extract-utilities" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.035283 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="fdf3e91b-4f36-4a7c-a900-ba7beb9352f9" containerName="registry-server" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.035945 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.039115 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-aee-default-env" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.039654 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-compute-config" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.040046 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"nova-extra-config" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.040182 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-migration-ssh-key" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.040301 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplanenodeset-openstack-edpm-ipam" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.040737 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dataplane-ansible-ssh-private-key-secret" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.040896 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-edpm-ipam-dockercfg-n5lmz" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.049374 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z"] Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.153352 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.153519 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.153684 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.153994 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ac474127-0c18-4b02-bffb-9c141a545df2-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.154137 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.154209 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.154255 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.154340 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tz8c\" (UniqueName: \"kubernetes.io/projected/ac474127-0c18-4b02-bffb-9c141a545df2-kube-api-access-9tz8c\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.154455 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.256431 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ac474127-0c18-4b02-bffb-9c141a545df2-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.256728 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.256832 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.256960 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.257565 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tz8c\" (UniqueName: \"kubernetes.io/projected/ac474127-0c18-4b02-bffb-9c141a545df2-kube-api-access-9tz8c\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.257648 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ac474127-0c18-4b02-bffb-9c141a545df2-nova-extra-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.257775 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.257907 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.257985 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.258079 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.326272 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.326301 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-inventory\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.327401 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-ssh-key\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.327804 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.327817 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-0\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.327493 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-1\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.328148 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-combined-ca-bundle\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.328424 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tz8c\" (UniqueName: \"kubernetes.io/projected/ac474127-0c18-4b02-bffb-9c141a545df2-kube-api-access-9tz8c\") pod \"nova-edpm-deployment-openstack-edpm-ipam-fsw2z\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.355245 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.854695 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z"] Oct 02 11:59:06 crc kubenswrapper[4783]: I1002 11:59:06.858962 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 11:59:07 crc kubenswrapper[4783]: I1002 11:59:07.781003 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" event={"ID":"ac474127-0c18-4b02-bffb-9c141a545df2","Type":"ContainerStarted","Data":"9a2ace11171c5c071e91ea56d301e36e27c9b0c54627111c0dc80d410c2107ea"} Oct 02 11:59:07 crc kubenswrapper[4783]: I1002 11:59:07.782183 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" event={"ID":"ac474127-0c18-4b02-bffb-9c141a545df2","Type":"ContainerStarted","Data":"02c4cd8374bb90d779890b3fb64483ba5fbaff8969cd8712ce96896ae61d54a8"} Oct 02 11:59:07 crc kubenswrapper[4783]: I1002 11:59:07.801311 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" podStartSLOduration=1.604558031 podStartE2EDuration="1.801296627s" podCreationTimestamp="2025-10-02 11:59:06 +0000 UTC" firstStartedPulling="2025-10-02 11:59:06.858730734 +0000 UTC m=+3980.174924995" lastFinishedPulling="2025-10-02 11:59:07.05546933 +0000 UTC m=+3980.371663591" observedRunningTime="2025-10-02 11:59:07.798508182 +0000 UTC m=+3981.114702453" watchObservedRunningTime="2025-10-02 11:59:07.801296627 +0000 UTC m=+3981.117490888" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.145051 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx"] Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.147627 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.151196 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.151627 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.157243 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx"] Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.314712 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-898g7\" (UniqueName: \"kubernetes.io/projected/e3c376da-acf0-43f1-a2de-54be08fcedbf-kube-api-access-898g7\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.315079 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3c376da-acf0-43f1-a2de-54be08fcedbf-secret-volume\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.315189 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3c376da-acf0-43f1-a2de-54be08fcedbf-config-volume\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.416978 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-898g7\" (UniqueName: \"kubernetes.io/projected/e3c376da-acf0-43f1-a2de-54be08fcedbf-kube-api-access-898g7\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.417439 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3c376da-acf0-43f1-a2de-54be08fcedbf-secret-volume\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.417495 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3c376da-acf0-43f1-a2de-54be08fcedbf-config-volume\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.418563 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3c376da-acf0-43f1-a2de-54be08fcedbf-config-volume\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.426728 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3c376da-acf0-43f1-a2de-54be08fcedbf-secret-volume\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.441018 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-898g7\" (UniqueName: \"kubernetes.io/projected/e3c376da-acf0-43f1-a2de-54be08fcedbf-kube-api-access-898g7\") pod \"collect-profiles-29323440-tknhx\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.482267 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:00 crc kubenswrapper[4783]: I1002 12:00:00.957209 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx"] Oct 02 12:00:01 crc kubenswrapper[4783]: I1002 12:00:01.268346 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" event={"ID":"e3c376da-acf0-43f1-a2de-54be08fcedbf","Type":"ContainerStarted","Data":"a0e9131a8c7812ae52acce7689a371b47f2c28948e5582324b1dc2846cc3c71c"} Oct 02 12:00:01 crc kubenswrapper[4783]: I1002 12:00:01.268771 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" event={"ID":"e3c376da-acf0-43f1-a2de-54be08fcedbf","Type":"ContainerStarted","Data":"1b950af5440cb2e41c67cf18c9bd15cf75fe695502a5bacdebef5a1fada280e8"} Oct 02 12:00:01 crc kubenswrapper[4783]: I1002 12:00:01.288514 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" podStartSLOduration=1.288491614 podStartE2EDuration="1.288491614s" podCreationTimestamp="2025-10-02 12:00:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 12:00:01.284691901 +0000 UTC m=+4034.600886162" watchObservedRunningTime="2025-10-02 12:00:01.288491614 +0000 UTC m=+4034.604685875" Oct 02 12:00:02 crc kubenswrapper[4783]: I1002 12:00:02.289047 4783 generic.go:334] "Generic (PLEG): container finished" podID="e3c376da-acf0-43f1-a2de-54be08fcedbf" containerID="a0e9131a8c7812ae52acce7689a371b47f2c28948e5582324b1dc2846cc3c71c" exitCode=0 Oct 02 12:00:02 crc kubenswrapper[4783]: I1002 12:00:02.289132 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" event={"ID":"e3c376da-acf0-43f1-a2de-54be08fcedbf","Type":"ContainerDied","Data":"a0e9131a8c7812ae52acce7689a371b47f2c28948e5582324b1dc2846cc3c71c"} Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.627699 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.779528 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-898g7\" (UniqueName: \"kubernetes.io/projected/e3c376da-acf0-43f1-a2de-54be08fcedbf-kube-api-access-898g7\") pod \"e3c376da-acf0-43f1-a2de-54be08fcedbf\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.779747 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3c376da-acf0-43f1-a2de-54be08fcedbf-secret-volume\") pod \"e3c376da-acf0-43f1-a2de-54be08fcedbf\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.779940 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3c376da-acf0-43f1-a2de-54be08fcedbf-config-volume\") pod \"e3c376da-acf0-43f1-a2de-54be08fcedbf\" (UID: \"e3c376da-acf0-43f1-a2de-54be08fcedbf\") " Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.780648 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e3c376da-acf0-43f1-a2de-54be08fcedbf-config-volume" (OuterVolumeSpecName: "config-volume") pod "e3c376da-acf0-43f1-a2de-54be08fcedbf" (UID: "e3c376da-acf0-43f1-a2de-54be08fcedbf"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.781554 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/e3c376da-acf0-43f1-a2de-54be08fcedbf-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.786718 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3c376da-acf0-43f1-a2de-54be08fcedbf-kube-api-access-898g7" (OuterVolumeSpecName: "kube-api-access-898g7") pod "e3c376da-acf0-43f1-a2de-54be08fcedbf" (UID: "e3c376da-acf0-43f1-a2de-54be08fcedbf"). InnerVolumeSpecName "kube-api-access-898g7". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.786807 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3c376da-acf0-43f1-a2de-54be08fcedbf-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "e3c376da-acf0-43f1-a2de-54be08fcedbf" (UID: "e3c376da-acf0-43f1-a2de-54be08fcedbf"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.822890 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-vxzsb"] Oct 02 12:00:03 crc kubenswrapper[4783]: E1002 12:00:03.823457 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3c376da-acf0-43f1-a2de-54be08fcedbf" containerName="collect-profiles" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.823482 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3c376da-acf0-43f1-a2de-54be08fcedbf" containerName="collect-profiles" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.823730 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3c376da-acf0-43f1-a2de-54be08fcedbf" containerName="collect-profiles" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.825581 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.862196 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vxzsb"] Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.882964 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-898g7\" (UniqueName: \"kubernetes.io/projected/e3c376da-acf0-43f1-a2de-54be08fcedbf-kube-api-access-898g7\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.883002 4783 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/e3c376da-acf0-43f1-a2de-54be08fcedbf-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.985231 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9k9g6\" (UniqueName: \"kubernetes.io/projected/fc83538f-5164-4f06-9db5-53980f5884db-kube-api-access-9k9g6\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.985388 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-catalog-content\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:03 crc kubenswrapper[4783]: I1002 12:00:03.985610 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-utilities\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.088040 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9k9g6\" (UniqueName: \"kubernetes.io/projected/fc83538f-5164-4f06-9db5-53980f5884db-kube-api-access-9k9g6\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.088125 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-catalog-content\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.088191 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-utilities\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.088810 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-utilities\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.089378 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-catalog-content\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.109601 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9k9g6\" (UniqueName: \"kubernetes.io/projected/fc83538f-5164-4f06-9db5-53980f5884db-kube-api-access-9k9g6\") pod \"redhat-operators-vxzsb\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.218743 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.316624 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" event={"ID":"e3c376da-acf0-43f1-a2de-54be08fcedbf","Type":"ContainerDied","Data":"1b950af5440cb2e41c67cf18c9bd15cf75fe695502a5bacdebef5a1fada280e8"} Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.316995 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1b950af5440cb2e41c67cf18c9bd15cf75fe695502a5bacdebef5a1fada280e8" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.317062 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323440-tknhx" Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.389572 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg"] Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.397972 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323395-9gvqg"] Oct 02 12:00:04 crc kubenswrapper[4783]: I1002 12:00:04.714519 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-vxzsb"] Oct 02 12:00:05 crc kubenswrapper[4783]: I1002 12:00:05.326037 4783 generic.go:334] "Generic (PLEG): container finished" podID="fc83538f-5164-4f06-9db5-53980f5884db" containerID="2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2" exitCode=0 Oct 02 12:00:05 crc kubenswrapper[4783]: I1002 12:00:05.326086 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxzsb" event={"ID":"fc83538f-5164-4f06-9db5-53980f5884db","Type":"ContainerDied","Data":"2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2"} Oct 02 12:00:05 crc kubenswrapper[4783]: I1002 12:00:05.326146 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxzsb" event={"ID":"fc83538f-5164-4f06-9db5-53980f5884db","Type":"ContainerStarted","Data":"66f8ddc4ced5973d9ff05b467881b3cd39dc4cbde03662657251814e319b15e9"} Oct 02 12:00:05 crc kubenswrapper[4783]: I1002 12:00:05.559483 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0af4bab9-5198-4d2c-a811-17db77304d40" path="/var/lib/kubelet/pods/0af4bab9-5198-4d2c-a811-17db77304d40/volumes" Oct 02 12:00:07 crc kubenswrapper[4783]: I1002 12:00:07.349913 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxzsb" event={"ID":"fc83538f-5164-4f06-9db5-53980f5884db","Type":"ContainerStarted","Data":"e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519"} Oct 02 12:00:13 crc kubenswrapper[4783]: I1002 12:00:13.420938 4783 generic.go:334] "Generic (PLEG): container finished" podID="fc83538f-5164-4f06-9db5-53980f5884db" containerID="e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519" exitCode=0 Oct 02 12:00:13 crc kubenswrapper[4783]: I1002 12:00:13.421019 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxzsb" event={"ID":"fc83538f-5164-4f06-9db5-53980f5884db","Type":"ContainerDied","Data":"e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519"} Oct 02 12:00:14 crc kubenswrapper[4783]: I1002 12:00:14.434998 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxzsb" event={"ID":"fc83538f-5164-4f06-9db5-53980f5884db","Type":"ContainerStarted","Data":"1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4"} Oct 02 12:00:14 crc kubenswrapper[4783]: I1002 12:00:14.459473 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-vxzsb" podStartSLOduration=2.861559441 podStartE2EDuration="11.459434213s" podCreationTimestamp="2025-10-02 12:00:03 +0000 UTC" firstStartedPulling="2025-10-02 12:00:05.328524104 +0000 UTC m=+4038.644718375" lastFinishedPulling="2025-10-02 12:00:13.926398886 +0000 UTC m=+4047.242593147" observedRunningTime="2025-10-02 12:00:14.45600843 +0000 UTC m=+4047.772202691" watchObservedRunningTime="2025-10-02 12:00:14.459434213 +0000 UTC m=+4047.775628494" Oct 02 12:00:24 crc kubenswrapper[4783]: I1002 12:00:24.219052 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:24 crc kubenswrapper[4783]: I1002 12:00:24.219593 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:24 crc kubenswrapper[4783]: I1002 12:00:24.265912 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:24 crc kubenswrapper[4783]: I1002 12:00:24.584779 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:24 crc kubenswrapper[4783]: I1002 12:00:24.632861 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vxzsb"] Oct 02 12:00:26 crc kubenswrapper[4783]: I1002 12:00:26.550501 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-vxzsb" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="registry-server" containerID="cri-o://1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4" gracePeriod=2 Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.117563 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.226541 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-utilities\") pod \"fc83538f-5164-4f06-9db5-53980f5884db\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.226711 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-catalog-content\") pod \"fc83538f-5164-4f06-9db5-53980f5884db\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.226793 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9k9g6\" (UniqueName: \"kubernetes.io/projected/fc83538f-5164-4f06-9db5-53980f5884db-kube-api-access-9k9g6\") pod \"fc83538f-5164-4f06-9db5-53980f5884db\" (UID: \"fc83538f-5164-4f06-9db5-53980f5884db\") " Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.228021 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-utilities" (OuterVolumeSpecName: "utilities") pod "fc83538f-5164-4f06-9db5-53980f5884db" (UID: "fc83538f-5164-4f06-9db5-53980f5884db"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.234523 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fc83538f-5164-4f06-9db5-53980f5884db-kube-api-access-9k9g6" (OuterVolumeSpecName: "kube-api-access-9k9g6") pod "fc83538f-5164-4f06-9db5-53980f5884db" (UID: "fc83538f-5164-4f06-9db5-53980f5884db"). InnerVolumeSpecName "kube-api-access-9k9g6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.313934 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fc83538f-5164-4f06-9db5-53980f5884db" (UID: "fc83538f-5164-4f06-9db5-53980f5884db"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.328571 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.328601 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9k9g6\" (UniqueName: \"kubernetes.io/projected/fc83538f-5164-4f06-9db5-53980f5884db-kube-api-access-9k9g6\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.328611 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fc83538f-5164-4f06-9db5-53980f5884db-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.571687 4783 generic.go:334] "Generic (PLEG): container finished" podID="ac474127-0c18-4b02-bffb-9c141a545df2" containerID="9a2ace11171c5c071e91ea56d301e36e27c9b0c54627111c0dc80d410c2107ea" exitCode=2 Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.572039 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" event={"ID":"ac474127-0c18-4b02-bffb-9c141a545df2","Type":"ContainerDied","Data":"9a2ace11171c5c071e91ea56d301e36e27c9b0c54627111c0dc80d410c2107ea"} Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.579659 4783 generic.go:334] "Generic (PLEG): container finished" podID="fc83538f-5164-4f06-9db5-53980f5884db" containerID="1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4" exitCode=0 Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.579714 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxzsb" event={"ID":"fc83538f-5164-4f06-9db5-53980f5884db","Type":"ContainerDied","Data":"1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4"} Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.579751 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-vxzsb" event={"ID":"fc83538f-5164-4f06-9db5-53980f5884db","Type":"ContainerDied","Data":"66f8ddc4ced5973d9ff05b467881b3cd39dc4cbde03662657251814e319b15e9"} Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.579767 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-vxzsb" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.579777 4783 scope.go:117] "RemoveContainer" containerID="1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.623892 4783 scope.go:117] "RemoveContainer" containerID="e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.633806 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-vxzsb"] Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.647346 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-vxzsb"] Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.650779 4783 scope.go:117] "RemoveContainer" containerID="2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.692614 4783 scope.go:117] "RemoveContainer" containerID="1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4" Oct 02 12:00:27 crc kubenswrapper[4783]: E1002 12:00:27.693017 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4\": container with ID starting with 1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4 not found: ID does not exist" containerID="1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.693065 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4"} err="failed to get container status \"1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4\": rpc error: code = NotFound desc = could not find container \"1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4\": container with ID starting with 1bf697b0244cbc940f640d08fb862811293c70104b0a74022bd97a50f8fe70b4 not found: ID does not exist" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.693094 4783 scope.go:117] "RemoveContainer" containerID="e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519" Oct 02 12:00:27 crc kubenswrapper[4783]: E1002 12:00:27.693635 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519\": container with ID starting with e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519 not found: ID does not exist" containerID="e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.693768 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519"} err="failed to get container status \"e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519\": rpc error: code = NotFound desc = could not find container \"e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519\": container with ID starting with e4636bdea8b16334099d6cbedecb7ed55121485cd4dd825b058b005bc2500519 not found: ID does not exist" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.693886 4783 scope.go:117] "RemoveContainer" containerID="2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2" Oct 02 12:00:27 crc kubenswrapper[4783]: E1002 12:00:27.694269 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2\": container with ID starting with 2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2 not found: ID does not exist" containerID="2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2" Oct 02 12:00:27 crc kubenswrapper[4783]: I1002 12:00:27.694297 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2"} err="failed to get container status \"2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2\": rpc error: code = NotFound desc = could not find container \"2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2\": container with ID starting with 2146f14eb6a6abfb50a7316a98ca826f4b670b15c0fb5bf88918d12efc4a5fd2 not found: ID does not exist" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.574025 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fc83538f-5164-4f06-9db5-53980f5884db" path="/var/lib/kubelet/pods/fc83538f-5164-4f06-9db5-53980f5884db/volumes" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.600313 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" event={"ID":"ac474127-0c18-4b02-bffb-9c141a545df2","Type":"ContainerDied","Data":"02c4cd8374bb90d779890b3fb64483ba5fbaff8969cd8712ce96896ae61d54a8"} Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.600350 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="02c4cd8374bb90d779890b3fb64483ba5fbaff8969cd8712ce96896ae61d54a8" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.789931 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.881736 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-0\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.881788 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-inventory\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.881839 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ac474127-0c18-4b02-bffb-9c141a545df2-nova-extra-config-0\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.882104 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-1\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.882141 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tz8c\" (UniqueName: \"kubernetes.io/projected/ac474127-0c18-4b02-bffb-9c141a545df2-kube-api-access-9tz8c\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.882988 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-1\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.883017 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-0\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.883044 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-ssh-key\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.883071 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-combined-ca-bundle\") pod \"ac474127-0c18-4b02-bffb-9c141a545df2\" (UID: \"ac474127-0c18-4b02-bffb-9c141a545df2\") " Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.887479 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-combined-ca-bundle" (OuterVolumeSpecName: "nova-combined-ca-bundle") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "nova-combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.900067 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac474127-0c18-4b02-bffb-9c141a545df2-kube-api-access-9tz8c" (OuterVolumeSpecName: "kube-api-access-9tz8c") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "kube-api-access-9tz8c". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.911044 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-1" (OuterVolumeSpecName: "nova-migration-ssh-key-1") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "nova-migration-ssh-key-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.914505 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-ssh-key" (OuterVolumeSpecName: "ssh-key") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "ssh-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.914854 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-1" (OuterVolumeSpecName: "nova-cell1-compute-config-1") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "nova-cell1-compute-config-1". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.916574 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-0" (OuterVolumeSpecName: "nova-cell1-compute-config-0") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "nova-cell1-compute-config-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.920606 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-inventory" (OuterVolumeSpecName: "inventory") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "inventory". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.921258 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac474127-0c18-4b02-bffb-9c141a545df2-nova-extra-config-0" (OuterVolumeSpecName: "nova-extra-config-0") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "nova-extra-config-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.935478 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-0" (OuterVolumeSpecName: "nova-migration-ssh-key-0") pod "ac474127-0c18-4b02-bffb-9c141a545df2" (UID: "ac474127-0c18-4b02-bffb-9c141a545df2"). InnerVolumeSpecName "nova-migration-ssh-key-0". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985829 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-1\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985877 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tz8c\" (UniqueName: \"kubernetes.io/projected/ac474127-0c18-4b02-bffb-9c141a545df2-kube-api-access-9tz8c\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985892 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-1\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-1\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985906 4783 reconciler_common.go:293] "Volume detached for volume \"nova-cell1-compute-config-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-cell1-compute-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985920 4783 reconciler_common.go:293] "Volume detached for volume \"ssh-key\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-ssh-key\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985932 4783 reconciler_common.go:293] "Volume detached for volume \"nova-combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985944 4783 reconciler_common.go:293] "Volume detached for volume \"nova-migration-ssh-key-0\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-nova-migration-ssh-key-0\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985956 4783 reconciler_common.go:293] "Volume detached for volume \"inventory\" (UniqueName: \"kubernetes.io/secret/ac474127-0c18-4b02-bffb-9c141a545df2-inventory\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:29 crc kubenswrapper[4783]: I1002 12:00:29.985979 4783 reconciler_common.go:293] "Volume detached for volume \"nova-extra-config-0\" (UniqueName: \"kubernetes.io/configmap/ac474127-0c18-4b02-bffb-9c141a545df2-nova-extra-config-0\") on node \"crc\" DevicePath \"\"" Oct 02 12:00:30 crc kubenswrapper[4783]: I1002 12:00:30.610332 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-edpm-deployment-openstack-edpm-ipam-fsw2z" Oct 02 12:00:51 crc kubenswrapper[4783]: I1002 12:00:51.513899 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:00:51 crc kubenswrapper[4783]: I1002 12:00:51.514475 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:00:56 crc kubenswrapper[4783]: I1002 12:00:56.878911 4783 scope.go:117] "RemoveContainer" containerID="731599dcf1b27223bb7f1169e21a3d5cfa4d21a91da6ce99cd69266dfc646729" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.256589 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Oct 02 12:00:59 crc kubenswrapper[4783]: E1002 12:00:59.257542 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="extract-content" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.257556 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="extract-content" Oct 02 12:00:59 crc kubenswrapper[4783]: E1002 12:00:59.259330 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="extract-utilities" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.259349 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="extract-utilities" Oct 02 12:00:59 crc kubenswrapper[4783]: E1002 12:00:59.260100 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="registry-server" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.260126 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="registry-server" Oct 02 12:00:59 crc kubenswrapper[4783]: E1002 12:00:59.260162 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac474127-0c18-4b02-bffb-9c141a545df2" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.260171 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac474127-0c18-4b02-bffb-9c141a545df2" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.260551 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac474127-0c18-4b02-bffb-9c141a545df2" containerName="nova-edpm-deployment-openstack-edpm-ipam" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.260574 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="fc83538f-5164-4f06-9db5-53980f5884db" containerName="registry-server" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.262883 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.278662 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.345822 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8pj8j\" (UniqueName: \"kubernetes.io/projected/608d352b-b8eb-470f-b5b0-1955e4f5cafb-kube-api-access-8pj8j\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.345914 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-utilities\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.346010 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-catalog-content\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.447924 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8pj8j\" (UniqueName: \"kubernetes.io/projected/608d352b-b8eb-470f-b5b0-1955e4f5cafb-kube-api-access-8pj8j\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.448386 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-utilities\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.448933 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-utilities\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.449137 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-catalog-content\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.449471 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-catalog-content\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.479221 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8pj8j\" (UniqueName: \"kubernetes.io/projected/608d352b-b8eb-470f-b5b0-1955e4f5cafb-kube-api-access-8pj8j\") pod \"community-operators-fl7qg\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:00:59 crc kubenswrapper[4783]: I1002 12:00:59.593667 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.075904 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.155869 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-cron-29323441-wrfzv"] Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.157758 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.168346 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29323441-wrfzv"] Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.274313 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-combined-ca-bundle\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.274705 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-fernet-keys\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.274849 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-config-data\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.274918 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5sjhg\" (UniqueName: \"kubernetes.io/projected/4c76682e-325d-4531-9afb-fcaed9ed292d-kube-api-access-5sjhg\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.376849 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-config-data\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.376931 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5sjhg\" (UniqueName: \"kubernetes.io/projected/4c76682e-325d-4531-9afb-fcaed9ed292d-kube-api-access-5sjhg\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.376978 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-combined-ca-bundle\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.376995 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-fernet-keys\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.383515 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-combined-ca-bundle\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.383710 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-fernet-keys\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.384678 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-config-data\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.395162 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5sjhg\" (UniqueName: \"kubernetes.io/projected/4c76682e-325d-4531-9afb-fcaed9ed292d-kube-api-access-5sjhg\") pod \"keystone-cron-29323441-wrfzv\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.502611 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.884390 4783 generic.go:334] "Generic (PLEG): container finished" podID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerID="47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55" exitCode=0 Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.884454 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"608d352b-b8eb-470f-b5b0-1955e4f5cafb","Type":"ContainerDied","Data":"47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55"} Oct 02 12:01:00 crc kubenswrapper[4783]: I1002 12:01:00.884481 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"608d352b-b8eb-470f-b5b0-1955e4f5cafb","Type":"ContainerStarted","Data":"f34a8556e32b84fb88c9012377d9cd64295a593932b68d3324503c7168fae6eb"} Oct 02 12:01:01 crc kubenswrapper[4783]: I1002 12:01:01.371208 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-cron-29323441-wrfzv"] Oct 02 12:01:01 crc kubenswrapper[4783]: I1002 12:01:01.897690 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29323441-wrfzv" event={"ID":"4c76682e-325d-4531-9afb-fcaed9ed292d","Type":"ContainerStarted","Data":"2fda8c61dd9e195ec15122240e3a1137dd5b7fb865e24c9fb801ce7dd2fa6a61"} Oct 02 12:01:01 crc kubenswrapper[4783]: I1002 12:01:01.898073 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29323441-wrfzv" event={"ID":"4c76682e-325d-4531-9afb-fcaed9ed292d","Type":"ContainerStarted","Data":"db6fca993d9284a0175232c2ea8f917471c9d8637789aaca43d44f82ae2ab51c"} Oct 02 12:01:01 crc kubenswrapper[4783]: I1002 12:01:01.929575 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-cron-29323441-wrfzv" podStartSLOduration=1.929547385 podStartE2EDuration="1.929547385s" podCreationTimestamp="2025-10-02 12:01:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 12:01:01.92309722 +0000 UTC m=+4095.239291481" watchObservedRunningTime="2025-10-02 12:01:01.929547385 +0000 UTC m=+4095.245741646" Oct 02 12:01:02 crc kubenswrapper[4783]: I1002 12:01:02.913194 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"608d352b-b8eb-470f-b5b0-1955e4f5cafb","Type":"ContainerStarted","Data":"fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2"} Oct 02 12:01:03 crc kubenswrapper[4783]: I1002 12:01:03.921496 4783 generic.go:334] "Generic (PLEG): container finished" podID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerID="fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2" exitCode=0 Oct 02 12:01:03 crc kubenswrapper[4783]: I1002 12:01:03.921543 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"608d352b-b8eb-470f-b5b0-1955e4f5cafb","Type":"ContainerDied","Data":"fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2"} Oct 02 12:01:04 crc kubenswrapper[4783]: I1002 12:01:04.932452 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"608d352b-b8eb-470f-b5b0-1955e4f5cafb","Type":"ContainerStarted","Data":"435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072"} Oct 02 12:01:05 crc kubenswrapper[4783]: I1002 12:01:05.944399 4783 generic.go:334] "Generic (PLEG): container finished" podID="4c76682e-325d-4531-9afb-fcaed9ed292d" containerID="2fda8c61dd9e195ec15122240e3a1137dd5b7fb865e24c9fb801ce7dd2fa6a61" exitCode=0 Oct 02 12:01:05 crc kubenswrapper[4783]: I1002 12:01:05.944450 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29323441-wrfzv" event={"ID":"4c76682e-325d-4531-9afb-fcaed9ed292d","Type":"ContainerDied","Data":"2fda8c61dd9e195ec15122240e3a1137dd5b7fb865e24c9fb801ce7dd2fa6a61"} Oct 02 12:01:05 crc kubenswrapper[4783]: I1002 12:01:05.964286 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-fl7qg" podStartSLOduration=3.396582262 podStartE2EDuration="6.964269731s" podCreationTimestamp="2025-10-02 12:00:59 +0000 UTC" firstStartedPulling="2025-10-02 12:01:00.888273055 +0000 UTC m=+4094.204467326" lastFinishedPulling="2025-10-02 12:01:04.455960534 +0000 UTC m=+4097.772154795" observedRunningTime="2025-10-02 12:01:04.957214868 +0000 UTC m=+4098.273409129" watchObservedRunningTime="2025-10-02 12:01:05.964269731 +0000 UTC m=+4099.280463982" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.318656 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.408118 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-config-data\") pod \"4c76682e-325d-4531-9afb-fcaed9ed292d\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.408231 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-fernet-keys\") pod \"4c76682e-325d-4531-9afb-fcaed9ed292d\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.408376 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-combined-ca-bundle\") pod \"4c76682e-325d-4531-9afb-fcaed9ed292d\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.408518 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5sjhg\" (UniqueName: \"kubernetes.io/projected/4c76682e-325d-4531-9afb-fcaed9ed292d-kube-api-access-5sjhg\") pod \"4c76682e-325d-4531-9afb-fcaed9ed292d\" (UID: \"4c76682e-325d-4531-9afb-fcaed9ed292d\") " Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.413917 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c76682e-325d-4531-9afb-fcaed9ed292d-kube-api-access-5sjhg" (OuterVolumeSpecName: "kube-api-access-5sjhg") pod "4c76682e-325d-4531-9afb-fcaed9ed292d" (UID: "4c76682e-325d-4531-9afb-fcaed9ed292d"). InnerVolumeSpecName "kube-api-access-5sjhg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.414196 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "4c76682e-325d-4531-9afb-fcaed9ed292d" (UID: "4c76682e-325d-4531-9afb-fcaed9ed292d"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.437611 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c76682e-325d-4531-9afb-fcaed9ed292d" (UID: "4c76682e-325d-4531-9afb-fcaed9ed292d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.480562 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-config-data" (OuterVolumeSpecName: "config-data") pod "4c76682e-325d-4531-9afb-fcaed9ed292d" (UID: "4c76682e-325d-4531-9afb-fcaed9ed292d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.510582 4783 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-config-data\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.510635 4783 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-fernet-keys\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.510647 4783 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c76682e-325d-4531-9afb-fcaed9ed292d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.510656 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5sjhg\" (UniqueName: \"kubernetes.io/projected/4c76682e-325d-4531-9afb-fcaed9ed292d-kube-api-access-5sjhg\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.965024 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-cron-29323441-wrfzv" event={"ID":"4c76682e-325d-4531-9afb-fcaed9ed292d","Type":"ContainerDied","Data":"db6fca993d9284a0175232c2ea8f917471c9d8637789aaca43d44f82ae2ab51c"} Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.965293 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="db6fca993d9284a0175232c2ea8f917471c9d8637789aaca43d44f82ae2ab51c" Oct 02 12:01:07 crc kubenswrapper[4783]: I1002 12:01:07.965125 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-cron-29323441-wrfzv" Oct 02 12:01:09 crc kubenswrapper[4783]: I1002 12:01:09.594519 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:01:09 crc kubenswrapper[4783]: I1002 12:01:09.594563 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:01:09 crc kubenswrapper[4783]: I1002 12:01:09.654856 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:01:10 crc kubenswrapper[4783]: I1002 12:01:10.026102 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:01:10 crc kubenswrapper[4783]: I1002 12:01:10.076150 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.001941 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-fl7qg" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="registry-server" containerID="cri-o://435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072" gracePeriod=2 Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.799378 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.910708 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8pj8j\" (UniqueName: \"kubernetes.io/projected/608d352b-b8eb-470f-b5b0-1955e4f5cafb-kube-api-access-8pj8j\") pod \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.911108 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-utilities\") pod \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.911890 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-utilities" (OuterVolumeSpecName: "utilities") pod "608d352b-b8eb-470f-b5b0-1955e4f5cafb" (UID: "608d352b-b8eb-470f-b5b0-1955e4f5cafb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.912085 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-catalog-content\") pod \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\" (UID: \"608d352b-b8eb-470f-b5b0-1955e4f5cafb\") " Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.913136 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:12 crc kubenswrapper[4783]: I1002 12:01:12.918328 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/608d352b-b8eb-470f-b5b0-1955e4f5cafb-kube-api-access-8pj8j" (OuterVolumeSpecName: "kube-api-access-8pj8j") pod "608d352b-b8eb-470f-b5b0-1955e4f5cafb" (UID: "608d352b-b8eb-470f-b5b0-1955e4f5cafb"). InnerVolumeSpecName "kube-api-access-8pj8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.014543 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8pj8j\" (UniqueName: \"kubernetes.io/projected/608d352b-b8eb-470f-b5b0-1955e4f5cafb-kube-api-access-8pj8j\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.014635 4783 generic.go:334] "Generic (PLEG): container finished" podID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerID="435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072" exitCode=0 Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.014678 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"608d352b-b8eb-470f-b5b0-1955e4f5cafb","Type":"ContainerDied","Data":"435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072"} Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.014705 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-fl7qg" event={"ID":"608d352b-b8eb-470f-b5b0-1955e4f5cafb","Type":"ContainerDied","Data":"f34a8556e32b84fb88c9012377d9cd64295a593932b68d3324503c7168fae6eb"} Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.014722 4783 scope.go:117] "RemoveContainer" containerID="435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.014912 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-fl7qg" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.034553 4783 scope.go:117] "RemoveContainer" containerID="fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.052077 4783 scope.go:117] "RemoveContainer" containerID="47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.096718 4783 scope.go:117] "RemoveContainer" containerID="435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072" Oct 02 12:01:13 crc kubenswrapper[4783]: E1002 12:01:13.097097 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072\": container with ID starting with 435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072 not found: ID does not exist" containerID="435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.097145 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072"} err="failed to get container status \"435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072\": rpc error: code = NotFound desc = could not find container \"435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072\": container with ID starting with 435f71aee1bc1bab91cf07498519ab3dbbb403f037b6d7bd613b0d4126785072 not found: ID does not exist" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.097178 4783 scope.go:117] "RemoveContainer" containerID="fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2" Oct 02 12:01:13 crc kubenswrapper[4783]: E1002 12:01:13.097702 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2\": container with ID starting with fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2 not found: ID does not exist" containerID="fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.097724 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2"} err="failed to get container status \"fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2\": rpc error: code = NotFound desc = could not find container \"fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2\": container with ID starting with fc4bf51de5ac9a126ea6eb4bed1c8fdc6a70a0c671eda182f6d9491300f3cad2 not found: ID does not exist" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.097738 4783 scope.go:117] "RemoveContainer" containerID="47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55" Oct 02 12:01:13 crc kubenswrapper[4783]: E1002 12:01:13.098198 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55\": container with ID starting with 47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55 not found: ID does not exist" containerID="47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.098231 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55"} err="failed to get container status \"47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55\": rpc error: code = NotFound desc = could not find container \"47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55\": container with ID starting with 47ecc7348e32da16dd343fc0dce903cf269e54a03f3f72286c9d8b9b1c3e8b55 not found: ID does not exist" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.749465 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "608d352b-b8eb-470f-b5b0-1955e4f5cafb" (UID: "608d352b-b8eb-470f-b5b0-1955e4f5cafb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.829991 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/608d352b-b8eb-470f-b5b0-1955e4f5cafb-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.947307 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Oct 02 12:01:13 crc kubenswrapper[4783]: I1002 12:01:13.957685 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-fl7qg"] Oct 02 12:01:15 crc kubenswrapper[4783]: I1002 12:01:15.557256 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" path="/var/lib/kubelet/pods/608d352b-b8eb-470f-b5b0-1955e4f5cafb/volumes" Oct 02 12:01:21 crc kubenswrapper[4783]: I1002 12:01:21.513568 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:01:21 crc kubenswrapper[4783]: I1002 12:01:21.514989 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.896593 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mmtvs"] Oct 02 12:01:36 crc kubenswrapper[4783]: E1002 12:01:36.897600 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="extract-utilities" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.897618 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="extract-utilities" Oct 02 12:01:36 crc kubenswrapper[4783]: E1002 12:01:36.897653 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="extract-content" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.897662 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="extract-content" Oct 02 12:01:36 crc kubenswrapper[4783]: E1002 12:01:36.897681 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="registry-server" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.897689 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="registry-server" Oct 02 12:01:36 crc kubenswrapper[4783]: E1002 12:01:36.897728 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c76682e-325d-4531-9afb-fcaed9ed292d" containerName="keystone-cron" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.897736 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c76682e-325d-4531-9afb-fcaed9ed292d" containerName="keystone-cron" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.897968 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="608d352b-b8eb-470f-b5b0-1955e4f5cafb" containerName="registry-server" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.897987 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c76682e-325d-4531-9afb-fcaed9ed292d" containerName="keystone-cron" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.899698 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.910065 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmtvs"] Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.975331 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjb65\" (UniqueName: \"kubernetes.io/projected/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-kube-api-access-vjb65\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.975484 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-utilities\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:36 crc kubenswrapper[4783]: I1002 12:01:36.975914 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-catalog-content\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.077853 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjb65\" (UniqueName: \"kubernetes.io/projected/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-kube-api-access-vjb65\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.077937 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-utilities\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.078146 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-catalog-content\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.078328 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-utilities\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.078904 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-catalog-content\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.098692 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjb65\" (UniqueName: \"kubernetes.io/projected/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-kube-api-access-vjb65\") pod \"redhat-marketplace-mmtvs\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.239900 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:37 crc kubenswrapper[4783]: I1002 12:01:37.670898 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmtvs"] Oct 02 12:01:38 crc kubenswrapper[4783]: I1002 12:01:38.260024 4783 generic.go:334] "Generic (PLEG): container finished" podID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerID="3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee" exitCode=0 Oct 02 12:01:38 crc kubenswrapper[4783]: I1002 12:01:38.260080 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmtvs" event={"ID":"d43431f0-1d11-470a-8e35-b0bd68c4a2e2","Type":"ContainerDied","Data":"3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee"} Oct 02 12:01:38 crc kubenswrapper[4783]: I1002 12:01:38.260364 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmtvs" event={"ID":"d43431f0-1d11-470a-8e35-b0bd68c4a2e2","Type":"ContainerStarted","Data":"71ac56ad4410468fe66523535bef6abba0a87f738bc6efa6e25cf94cc06afebd"} Oct 02 12:01:39 crc kubenswrapper[4783]: I1002 12:01:39.275885 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmtvs" event={"ID":"d43431f0-1d11-470a-8e35-b0bd68c4a2e2","Type":"ContainerStarted","Data":"55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c"} Oct 02 12:01:40 crc kubenswrapper[4783]: I1002 12:01:40.290042 4783 generic.go:334] "Generic (PLEG): container finished" podID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerID="55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c" exitCode=0 Oct 02 12:01:40 crc kubenswrapper[4783]: I1002 12:01:40.290244 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmtvs" event={"ID":"d43431f0-1d11-470a-8e35-b0bd68c4a2e2","Type":"ContainerDied","Data":"55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c"} Oct 02 12:01:41 crc kubenswrapper[4783]: I1002 12:01:41.300674 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmtvs" event={"ID":"d43431f0-1d11-470a-8e35-b0bd68c4a2e2","Type":"ContainerStarted","Data":"d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c"} Oct 02 12:01:41 crc kubenswrapper[4783]: I1002 12:01:41.318658 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mmtvs" podStartSLOduration=2.70250996 podStartE2EDuration="5.318639451s" podCreationTimestamp="2025-10-02 12:01:36 +0000 UTC" firstStartedPulling="2025-10-02 12:01:38.262808474 +0000 UTC m=+4131.579002735" lastFinishedPulling="2025-10-02 12:01:40.878937955 +0000 UTC m=+4134.195132226" observedRunningTime="2025-10-02 12:01:41.316491532 +0000 UTC m=+4134.632685793" watchObservedRunningTime="2025-10-02 12:01:41.318639451 +0000 UTC m=+4134.634833712" Oct 02 12:01:47 crc kubenswrapper[4783]: I1002 12:01:47.240566 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:47 crc kubenswrapper[4783]: I1002 12:01:47.241080 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:47 crc kubenswrapper[4783]: I1002 12:01:47.310734 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:47 crc kubenswrapper[4783]: I1002 12:01:47.424316 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:47 crc kubenswrapper[4783]: I1002 12:01:47.572028 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmtvs"] Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.382959 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mmtvs" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="registry-server" containerID="cri-o://d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c" gracePeriod=2 Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.838871 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.956110 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjb65\" (UniqueName: \"kubernetes.io/projected/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-kube-api-access-vjb65\") pod \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.956339 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-catalog-content\") pod \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.956428 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-utilities\") pod \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\" (UID: \"d43431f0-1d11-470a-8e35-b0bd68c4a2e2\") " Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.957308 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-utilities" (OuterVolumeSpecName: "utilities") pod "d43431f0-1d11-470a-8e35-b0bd68c4a2e2" (UID: "d43431f0-1d11-470a-8e35-b0bd68c4a2e2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.961082 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-kube-api-access-vjb65" (OuterVolumeSpecName: "kube-api-access-vjb65") pod "d43431f0-1d11-470a-8e35-b0bd68c4a2e2" (UID: "d43431f0-1d11-470a-8e35-b0bd68c4a2e2"). InnerVolumeSpecName "kube-api-access-vjb65". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:01:49 crc kubenswrapper[4783]: I1002 12:01:49.972683 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d43431f0-1d11-470a-8e35-b0bd68c4a2e2" (UID: "d43431f0-1d11-470a-8e35-b0bd68c4a2e2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.059015 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.059049 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjb65\" (UniqueName: \"kubernetes.io/projected/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-kube-api-access-vjb65\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.059059 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d43431f0-1d11-470a-8e35-b0bd68c4a2e2-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.404491 4783 generic.go:334] "Generic (PLEG): container finished" podID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerID="d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c" exitCode=0 Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.404567 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmtvs" event={"ID":"d43431f0-1d11-470a-8e35-b0bd68c4a2e2","Type":"ContainerDied","Data":"d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c"} Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.404601 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mmtvs" event={"ID":"d43431f0-1d11-470a-8e35-b0bd68c4a2e2","Type":"ContainerDied","Data":"71ac56ad4410468fe66523535bef6abba0a87f738bc6efa6e25cf94cc06afebd"} Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.404645 4783 scope.go:117] "RemoveContainer" containerID="d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.405319 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mmtvs" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.460514 4783 scope.go:117] "RemoveContainer" containerID="55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.466860 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmtvs"] Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.496566 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mmtvs"] Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.505174 4783 scope.go:117] "RemoveContainer" containerID="3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.539660 4783 scope.go:117] "RemoveContainer" containerID="d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c" Oct 02 12:01:50 crc kubenswrapper[4783]: E1002 12:01:50.540138 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c\": container with ID starting with d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c not found: ID does not exist" containerID="d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.540168 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c"} err="failed to get container status \"d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c\": rpc error: code = NotFound desc = could not find container \"d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c\": container with ID starting with d40903cd185b6e0bf84a061d64e23e97a670870a176f33e44706244fb9a8c89c not found: ID does not exist" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.540187 4783 scope.go:117] "RemoveContainer" containerID="55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c" Oct 02 12:01:50 crc kubenswrapper[4783]: E1002 12:01:50.540445 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c\": container with ID starting with 55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c not found: ID does not exist" containerID="55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.540491 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c"} err="failed to get container status \"55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c\": rpc error: code = NotFound desc = could not find container \"55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c\": container with ID starting with 55db9f14430d05419009d0828efa45cbe3019a5b9d8417b8acf658e2d52d824c not found: ID does not exist" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.540505 4783 scope.go:117] "RemoveContainer" containerID="3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee" Oct 02 12:01:50 crc kubenswrapper[4783]: E1002 12:01:50.540767 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee\": container with ID starting with 3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee not found: ID does not exist" containerID="3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee" Oct 02 12:01:50 crc kubenswrapper[4783]: I1002 12:01:50.540791 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee"} err="failed to get container status \"3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee\": rpc error: code = NotFound desc = could not find container \"3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee\": container with ID starting with 3f2bcdab1adb0aa48b2291032e0fc8d3aed9cff7108915ff821466c2e6f9cbee not found: ID does not exist" Oct 02 12:01:51 crc kubenswrapper[4783]: I1002 12:01:51.513531 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:01:51 crc kubenswrapper[4783]: I1002 12:01:51.513819 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:01:51 crc kubenswrapper[4783]: I1002 12:01:51.513864 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 12:01:51 crc kubenswrapper[4783]: I1002 12:01:51.514608 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"aafa172fb7173020190d3f951ea5c261639271a72bfdacce3f5cf23237ef1dd6"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 12:01:51 crc kubenswrapper[4783]: I1002 12:01:51.514673 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://aafa172fb7173020190d3f951ea5c261639271a72bfdacce3f5cf23237ef1dd6" gracePeriod=600 Oct 02 12:01:51 crc kubenswrapper[4783]: I1002 12:01:51.557618 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" path="/var/lib/kubelet/pods/d43431f0-1d11-470a-8e35-b0bd68c4a2e2/volumes" Oct 02 12:01:52 crc kubenswrapper[4783]: I1002 12:01:52.430898 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="aafa172fb7173020190d3f951ea5c261639271a72bfdacce3f5cf23237ef1dd6" exitCode=0 Oct 02 12:01:52 crc kubenswrapper[4783]: I1002 12:01:52.431043 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"aafa172fb7173020190d3f951ea5c261639271a72bfdacce3f5cf23237ef1dd6"} Oct 02 12:01:52 crc kubenswrapper[4783]: I1002 12:01:52.431538 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce"} Oct 02 12:01:52 crc kubenswrapper[4783]: I1002 12:01:52.431629 4783 scope.go:117] "RemoveContainer" containerID="7b64efa2db7d6a24365c13a8d7c58ab48d29750790f46655c264b5faf59edaa3" Oct 02 12:03:51 crc kubenswrapper[4783]: I1002 12:03:51.513616 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:03:51 crc kubenswrapper[4783]: I1002 12:03:51.514271 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:04:21 crc kubenswrapper[4783]: I1002 12:04:21.513679 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:04:21 crc kubenswrapper[4783]: I1002 12:04:21.514304 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:04:51 crc kubenswrapper[4783]: I1002 12:04:51.513850 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:04:51 crc kubenswrapper[4783]: I1002 12:04:51.514560 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:04:51 crc kubenswrapper[4783]: I1002 12:04:51.514644 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 12:04:51 crc kubenswrapper[4783]: I1002 12:04:51.515694 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 12:04:51 crc kubenswrapper[4783]: I1002 12:04:51.515824 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" gracePeriod=600 Oct 02 12:04:52 crc kubenswrapper[4783]: E1002 12:04:52.247174 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:04:52 crc kubenswrapper[4783]: I1002 12:04:52.337076 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" exitCode=0 Oct 02 12:04:52 crc kubenswrapper[4783]: I1002 12:04:52.337131 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce"} Oct 02 12:04:52 crc kubenswrapper[4783]: I1002 12:04:52.337169 4783 scope.go:117] "RemoveContainer" containerID="aafa172fb7173020190d3f951ea5c261639271a72bfdacce3f5cf23237ef1dd6" Oct 02 12:04:52 crc kubenswrapper[4783]: I1002 12:04:52.337928 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:04:52 crc kubenswrapper[4783]: E1002 12:04:52.338217 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:05:04 crc kubenswrapper[4783]: I1002 12:05:04.545710 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:05:04 crc kubenswrapper[4783]: E1002 12:05:04.546778 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:05:18 crc kubenswrapper[4783]: I1002 12:05:18.545864 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:05:18 crc kubenswrapper[4783]: E1002 12:05:18.546787 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:05:33 crc kubenswrapper[4783]: I1002 12:05:33.544624 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:05:33 crc kubenswrapper[4783]: E1002 12:05:33.545567 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:05:45 crc kubenswrapper[4783]: I1002 12:05:45.545365 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:05:45 crc kubenswrapper[4783]: E1002 12:05:45.546238 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.802696 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-x7447"] Oct 02 12:05:48 crc kubenswrapper[4783]: E1002 12:05:48.803643 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="registry-server" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.803664 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="registry-server" Oct 02 12:05:48 crc kubenswrapper[4783]: E1002 12:05:48.803682 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="extract-utilities" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.803691 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="extract-utilities" Oct 02 12:05:48 crc kubenswrapper[4783]: E1002 12:05:48.803710 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="extract-content" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.803720 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="extract-content" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.803965 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d43431f0-1d11-470a-8e35-b0bd68c4a2e2" containerName="registry-server" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.805935 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.828582 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x7447"] Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.868812 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-utilities\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.868940 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cv45h\" (UniqueName: \"kubernetes.io/projected/e137675e-7c5d-4c5f-9ce5-115379efaf1c-kube-api-access-cv45h\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.869008 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-catalog-content\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.971034 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cv45h\" (UniqueName: \"kubernetes.io/projected/e137675e-7c5d-4c5f-9ce5-115379efaf1c-kube-api-access-cv45h\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.971308 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-catalog-content\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.971467 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-utilities\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.971945 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-catalog-content\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.971945 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-utilities\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:48 crc kubenswrapper[4783]: I1002 12:05:48.997354 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cv45h\" (UniqueName: \"kubernetes.io/projected/e137675e-7c5d-4c5f-9ce5-115379efaf1c-kube-api-access-cv45h\") pod \"certified-operators-x7447\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:49 crc kubenswrapper[4783]: I1002 12:05:49.133629 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:49 crc kubenswrapper[4783]: I1002 12:05:49.674865 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-x7447"] Oct 02 12:05:49 crc kubenswrapper[4783]: I1002 12:05:49.922629 4783 generic.go:334] "Generic (PLEG): container finished" podID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerID="8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b" exitCode=0 Oct 02 12:05:49 crc kubenswrapper[4783]: I1002 12:05:49.922668 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x7447" event={"ID":"e137675e-7c5d-4c5f-9ce5-115379efaf1c","Type":"ContainerDied","Data":"8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b"} Oct 02 12:05:49 crc kubenswrapper[4783]: I1002 12:05:49.922697 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x7447" event={"ID":"e137675e-7c5d-4c5f-9ce5-115379efaf1c","Type":"ContainerStarted","Data":"81fae50088726f6680e98b81f70492b40845dc0fbd1dc67f159ba3683080b169"} Oct 02 12:05:49 crc kubenswrapper[4783]: I1002 12:05:49.924676 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 12:05:50 crc kubenswrapper[4783]: I1002 12:05:50.932794 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x7447" event={"ID":"e137675e-7c5d-4c5f-9ce5-115379efaf1c","Type":"ContainerStarted","Data":"066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da"} Oct 02 12:05:52 crc kubenswrapper[4783]: I1002 12:05:52.961136 4783 generic.go:334] "Generic (PLEG): container finished" podID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerID="066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da" exitCode=0 Oct 02 12:05:52 crc kubenswrapper[4783]: I1002 12:05:52.961204 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x7447" event={"ID":"e137675e-7c5d-4c5f-9ce5-115379efaf1c","Type":"ContainerDied","Data":"066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da"} Oct 02 12:05:53 crc kubenswrapper[4783]: I1002 12:05:53.973306 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x7447" event={"ID":"e137675e-7c5d-4c5f-9ce5-115379efaf1c","Type":"ContainerStarted","Data":"2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e"} Oct 02 12:05:54 crc kubenswrapper[4783]: I1002 12:05:54.018537 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-x7447" podStartSLOduration=2.5917754950000003 podStartE2EDuration="6.018513451s" podCreationTimestamp="2025-10-02 12:05:48 +0000 UTC" firstStartedPulling="2025-10-02 12:05:49.924462807 +0000 UTC m=+4383.240657068" lastFinishedPulling="2025-10-02 12:05:53.351200763 +0000 UTC m=+4386.667395024" observedRunningTime="2025-10-02 12:05:53.995304092 +0000 UTC m=+4387.311498363" watchObservedRunningTime="2025-10-02 12:05:54.018513451 +0000 UTC m=+4387.334707722" Oct 02 12:05:58 crc kubenswrapper[4783]: I1002 12:05:58.546137 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:05:58 crc kubenswrapper[4783]: E1002 12:05:58.546890 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:05:59 crc kubenswrapper[4783]: I1002 12:05:59.134049 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:59 crc kubenswrapper[4783]: I1002 12:05:59.134252 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:05:59 crc kubenswrapper[4783]: I1002 12:05:59.204696 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:06:00 crc kubenswrapper[4783]: I1002 12:06:00.089228 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:06:00 crc kubenswrapper[4783]: I1002 12:06:00.140384 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x7447"] Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.053681 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-x7447" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="registry-server" containerID="cri-o://2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e" gracePeriod=2 Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.555688 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.632679 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-utilities\") pod \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.632799 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cv45h\" (UniqueName: \"kubernetes.io/projected/e137675e-7c5d-4c5f-9ce5-115379efaf1c-kube-api-access-cv45h\") pod \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.633007 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-catalog-content\") pod \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\" (UID: \"e137675e-7c5d-4c5f-9ce5-115379efaf1c\") " Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.633757 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-utilities" (OuterVolumeSpecName: "utilities") pod "e137675e-7c5d-4c5f-9ce5-115379efaf1c" (UID: "e137675e-7c5d-4c5f-9ce5-115379efaf1c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.640231 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e137675e-7c5d-4c5f-9ce5-115379efaf1c-kube-api-access-cv45h" (OuterVolumeSpecName: "kube-api-access-cv45h") pod "e137675e-7c5d-4c5f-9ce5-115379efaf1c" (UID: "e137675e-7c5d-4c5f-9ce5-115379efaf1c"). InnerVolumeSpecName "kube-api-access-cv45h". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.679182 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e137675e-7c5d-4c5f-9ce5-115379efaf1c" (UID: "e137675e-7c5d-4c5f-9ce5-115379efaf1c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.735046 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.735102 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e137675e-7c5d-4c5f-9ce5-115379efaf1c-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:06:02 crc kubenswrapper[4783]: I1002 12:06:02.735112 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cv45h\" (UniqueName: \"kubernetes.io/projected/e137675e-7c5d-4c5f-9ce5-115379efaf1c-kube-api-access-cv45h\") on node \"crc\" DevicePath \"\"" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.073258 4783 generic.go:334] "Generic (PLEG): container finished" podID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerID="2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e" exitCode=0 Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.073349 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x7447" event={"ID":"e137675e-7c5d-4c5f-9ce5-115379efaf1c","Type":"ContainerDied","Data":"2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e"} Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.073609 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-x7447" event={"ID":"e137675e-7c5d-4c5f-9ce5-115379efaf1c","Type":"ContainerDied","Data":"81fae50088726f6680e98b81f70492b40845dc0fbd1dc67f159ba3683080b169"} Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.073364 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-x7447" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.073626 4783 scope.go:117] "RemoveContainer" containerID="2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.094629 4783 scope.go:117] "RemoveContainer" containerID="066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.125520 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-x7447"] Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.139605 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-x7447"] Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.147129 4783 scope.go:117] "RemoveContainer" containerID="8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.176469 4783 scope.go:117] "RemoveContainer" containerID="2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e" Oct 02 12:06:03 crc kubenswrapper[4783]: E1002 12:06:03.176814 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e\": container with ID starting with 2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e not found: ID does not exist" containerID="2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.176848 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e"} err="failed to get container status \"2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e\": rpc error: code = NotFound desc = could not find container \"2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e\": container with ID starting with 2aeddb561ad27702a06fde65eaeef34ba0c0ec93086b538cbd77d72bafbff29e not found: ID does not exist" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.176869 4783 scope.go:117] "RemoveContainer" containerID="066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da" Oct 02 12:06:03 crc kubenswrapper[4783]: E1002 12:06:03.177085 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da\": container with ID starting with 066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da not found: ID does not exist" containerID="066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.177108 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da"} err="failed to get container status \"066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da\": rpc error: code = NotFound desc = could not find container \"066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da\": container with ID starting with 066719cc9cb142b0bfd65687e388ee0d9c0d7fc6a76205005966e6cdde29a2da not found: ID does not exist" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.177120 4783 scope.go:117] "RemoveContainer" containerID="8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b" Oct 02 12:06:03 crc kubenswrapper[4783]: E1002 12:06:03.177286 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b\": container with ID starting with 8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b not found: ID does not exist" containerID="8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.177310 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b"} err="failed to get container status \"8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b\": rpc error: code = NotFound desc = could not find container \"8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b\": container with ID starting with 8f76004d0b102ac0e8bf873076b928d34ed2b385282af57ecd927d4f81aee32b not found: ID does not exist" Oct 02 12:06:03 crc kubenswrapper[4783]: I1002 12:06:03.563875 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" path="/var/lib/kubelet/pods/e137675e-7c5d-4c5f-9ce5-115379efaf1c/volumes" Oct 02 12:06:12 crc kubenswrapper[4783]: I1002 12:06:12.545498 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:06:12 crc kubenswrapper[4783]: E1002 12:06:12.546982 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:06:26 crc kubenswrapper[4783]: I1002 12:06:26.545129 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:06:26 crc kubenswrapper[4783]: E1002 12:06:26.546094 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:06:41 crc kubenswrapper[4783]: I1002 12:06:41.544640 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:06:41 crc kubenswrapper[4783]: E1002 12:06:41.545368 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:06:52 crc kubenswrapper[4783]: I1002 12:06:52.559098 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:06:52 crc kubenswrapper[4783]: E1002 12:06:52.573285 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:07:05 crc kubenswrapper[4783]: I1002 12:07:05.544967 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:07:05 crc kubenswrapper[4783]: E1002 12:07:05.546173 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:07:18 crc kubenswrapper[4783]: I1002 12:07:18.549350 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:07:18 crc kubenswrapper[4783]: E1002 12:07:18.551038 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:07:30 crc kubenswrapper[4783]: I1002 12:07:30.545520 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:07:30 crc kubenswrapper[4783]: E1002 12:07:30.546537 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:07:44 crc kubenswrapper[4783]: I1002 12:07:44.548456 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:07:44 crc kubenswrapper[4783]: E1002 12:07:44.550646 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:07:55 crc kubenswrapper[4783]: I1002 12:07:55.544934 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:07:55 crc kubenswrapper[4783]: E1002 12:07:55.545718 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:08:06 crc kubenswrapper[4783]: I1002 12:08:06.545339 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:08:06 crc kubenswrapper[4783]: E1002 12:08:06.546549 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:08:18 crc kubenswrapper[4783]: I1002 12:08:18.545040 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:08:18 crc kubenswrapper[4783]: E1002 12:08:18.545793 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:08:33 crc kubenswrapper[4783]: I1002 12:08:33.545355 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:08:33 crc kubenswrapper[4783]: E1002 12:08:33.546627 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:08:46 crc kubenswrapper[4783]: I1002 12:08:46.545578 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:08:46 crc kubenswrapper[4783]: E1002 12:08:46.546621 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:09:00 crc kubenswrapper[4783]: I1002 12:09:00.545753 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:09:00 crc kubenswrapper[4783]: E1002 12:09:00.547151 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:09:12 crc kubenswrapper[4783]: I1002 12:09:12.545335 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:09:12 crc kubenswrapper[4783]: E1002 12:09:12.547317 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:09:24 crc kubenswrapper[4783]: I1002 12:09:24.544964 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:09:24 crc kubenswrapper[4783]: E1002 12:09:24.545783 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:09:35 crc kubenswrapper[4783]: I1002 12:09:35.545588 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:09:35 crc kubenswrapper[4783]: E1002 12:09:35.547082 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:09:50 crc kubenswrapper[4783]: I1002 12:09:50.545118 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:09:50 crc kubenswrapper[4783]: E1002 12:09:50.546123 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:10:01 crc kubenswrapper[4783]: I1002 12:10:01.545856 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:10:02 crc kubenswrapper[4783]: I1002 12:10:02.632318 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"31c034a19e50d094a581a0f0df605c3e00aa8dd41f6fc7b1675a23f1b1bd8cff"} Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.816574 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-gqllj"] Oct 02 12:10:24 crc kubenswrapper[4783]: E1002 12:10:24.822473 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="extract-utilities" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.822533 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="extract-utilities" Oct 02 12:10:24 crc kubenswrapper[4783]: E1002 12:10:24.822553 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="registry-server" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.822561 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="registry-server" Oct 02 12:10:24 crc kubenswrapper[4783]: E1002 12:10:24.822575 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="extract-content" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.822602 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="extract-content" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.822855 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="e137675e-7c5d-4c5f-9ce5-115379efaf1c" containerName="registry-server" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.827687 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.837568 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gqllj"] Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.936509 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-utilities\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.936565 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s2f68\" (UniqueName: \"kubernetes.io/projected/03f0a97e-4346-482c-a269-1b70030ac48d-kube-api-access-s2f68\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:24 crc kubenswrapper[4783]: I1002 12:10:24.936629 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-catalog-content\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.038371 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-catalog-content\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.038567 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-utilities\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.038641 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2f68\" (UniqueName: \"kubernetes.io/projected/03f0a97e-4346-482c-a269-1b70030ac48d-kube-api-access-s2f68\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.039047 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-catalog-content\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.039064 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-utilities\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.061387 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2f68\" (UniqueName: \"kubernetes.io/projected/03f0a97e-4346-482c-a269-1b70030ac48d-kube-api-access-s2f68\") pod \"redhat-operators-gqllj\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.149692 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.448183 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-gqllj"] Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.888884 4783 generic.go:334] "Generic (PLEG): container finished" podID="03f0a97e-4346-482c-a269-1b70030ac48d" containerID="88996f376f5089e7069830d09f9cec8eb6a520239c5af6eaeab6041f85720cb8" exitCode=0 Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.889047 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gqllj" event={"ID":"03f0a97e-4346-482c-a269-1b70030ac48d","Type":"ContainerDied","Data":"88996f376f5089e7069830d09f9cec8eb6a520239c5af6eaeab6041f85720cb8"} Oct 02 12:10:25 crc kubenswrapper[4783]: I1002 12:10:25.889206 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gqllj" event={"ID":"03f0a97e-4346-482c-a269-1b70030ac48d","Type":"ContainerStarted","Data":"8b556889d19508a2e71e5083098b56fd7038593314adf33796449046b845535a"} Oct 02 12:10:27 crc kubenswrapper[4783]: I1002 12:10:27.908999 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gqllj" event={"ID":"03f0a97e-4346-482c-a269-1b70030ac48d","Type":"ContainerStarted","Data":"a324bd3265e57c298602b44427ef7de53ec2840671abf8087fde27bf2bdc3b2e"} Oct 02 12:10:30 crc kubenswrapper[4783]: I1002 12:10:30.938731 4783 generic.go:334] "Generic (PLEG): container finished" podID="03f0a97e-4346-482c-a269-1b70030ac48d" containerID="a324bd3265e57c298602b44427ef7de53ec2840671abf8087fde27bf2bdc3b2e" exitCode=0 Oct 02 12:10:30 crc kubenswrapper[4783]: I1002 12:10:30.938813 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gqllj" event={"ID":"03f0a97e-4346-482c-a269-1b70030ac48d","Type":"ContainerDied","Data":"a324bd3265e57c298602b44427ef7de53ec2840671abf8087fde27bf2bdc3b2e"} Oct 02 12:10:31 crc kubenswrapper[4783]: I1002 12:10:31.949247 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gqllj" event={"ID":"03f0a97e-4346-482c-a269-1b70030ac48d","Type":"ContainerStarted","Data":"ab38f89326c5f893f452b48d257487f4da87954ef33e38a356c3a938b9d3d5d6"} Oct 02 12:10:31 crc kubenswrapper[4783]: I1002 12:10:31.975395 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-gqllj" podStartSLOduration=2.483134058 podStartE2EDuration="7.975373583s" podCreationTimestamp="2025-10-02 12:10:24 +0000 UTC" firstStartedPulling="2025-10-02 12:10:25.890766163 +0000 UTC m=+4659.206960424" lastFinishedPulling="2025-10-02 12:10:31.383005688 +0000 UTC m=+4664.699199949" observedRunningTime="2025-10-02 12:10:31.96754107 +0000 UTC m=+4665.283735341" watchObservedRunningTime="2025-10-02 12:10:31.975373583 +0000 UTC m=+4665.291567844" Oct 02 12:10:35 crc kubenswrapper[4783]: I1002 12:10:35.150041 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:35 crc kubenswrapper[4783]: I1002 12:10:35.150678 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:36 crc kubenswrapper[4783]: I1002 12:10:36.209189 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-gqllj" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="registry-server" probeResult="failure" output=< Oct 02 12:10:36 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:10:36 crc kubenswrapper[4783]: > Oct 02 12:10:45 crc kubenswrapper[4783]: I1002 12:10:45.212522 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:45 crc kubenswrapper[4783]: I1002 12:10:45.295175 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:45 crc kubenswrapper[4783]: I1002 12:10:45.470301 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gqllj"] Oct 02 12:10:47 crc kubenswrapper[4783]: I1002 12:10:47.120453 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-gqllj" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="registry-server" containerID="cri-o://ab38f89326c5f893f452b48d257487f4da87954ef33e38a356c3a938b9d3d5d6" gracePeriod=2 Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.144335 4783 generic.go:334] "Generic (PLEG): container finished" podID="03f0a97e-4346-482c-a269-1b70030ac48d" containerID="ab38f89326c5f893f452b48d257487f4da87954ef33e38a356c3a938b9d3d5d6" exitCode=0 Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.144452 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gqllj" event={"ID":"03f0a97e-4346-482c-a269-1b70030ac48d","Type":"ContainerDied","Data":"ab38f89326c5f893f452b48d257487f4da87954ef33e38a356c3a938b9d3d5d6"} Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.144689 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-gqllj" event={"ID":"03f0a97e-4346-482c-a269-1b70030ac48d","Type":"ContainerDied","Data":"8b556889d19508a2e71e5083098b56fd7038593314adf33796449046b845535a"} Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.144705 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8b556889d19508a2e71e5083098b56fd7038593314adf33796449046b845535a" Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.209670 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.284933 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s2f68\" (UniqueName: \"kubernetes.io/projected/03f0a97e-4346-482c-a269-1b70030ac48d-kube-api-access-s2f68\") pod \"03f0a97e-4346-482c-a269-1b70030ac48d\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.285092 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-utilities\") pod \"03f0a97e-4346-482c-a269-1b70030ac48d\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.285249 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-catalog-content\") pod \"03f0a97e-4346-482c-a269-1b70030ac48d\" (UID: \"03f0a97e-4346-482c-a269-1b70030ac48d\") " Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.285885 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-utilities" (OuterVolumeSpecName: "utilities") pod "03f0a97e-4346-482c-a269-1b70030ac48d" (UID: "03f0a97e-4346-482c-a269-1b70030ac48d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.289495 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.304115 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/03f0a97e-4346-482c-a269-1b70030ac48d-kube-api-access-s2f68" (OuterVolumeSpecName: "kube-api-access-s2f68") pod "03f0a97e-4346-482c-a269-1b70030ac48d" (UID: "03f0a97e-4346-482c-a269-1b70030ac48d"). InnerVolumeSpecName "kube-api-access-s2f68". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.374344 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "03f0a97e-4346-482c-a269-1b70030ac48d" (UID: "03f0a97e-4346-482c-a269-1b70030ac48d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.390995 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/03f0a97e-4346-482c-a269-1b70030ac48d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:10:48 crc kubenswrapper[4783]: I1002 12:10:48.391136 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s2f68\" (UniqueName: \"kubernetes.io/projected/03f0a97e-4346-482c-a269-1b70030ac48d-kube-api-access-s2f68\") on node \"crc\" DevicePath \"\"" Oct 02 12:10:49 crc kubenswrapper[4783]: I1002 12:10:49.158653 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-gqllj" Oct 02 12:10:49 crc kubenswrapper[4783]: I1002 12:10:49.219456 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-gqllj"] Oct 02 12:10:49 crc kubenswrapper[4783]: I1002 12:10:49.234072 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-gqllj"] Oct 02 12:10:49 crc kubenswrapper[4783]: I1002 12:10:49.564389 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" path="/var/lib/kubelet/pods/03f0a97e-4346-482c-a269-1b70030ac48d/volumes" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.746861 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tmb69"] Oct 02 12:11:47 crc kubenswrapper[4783]: E1002 12:11:47.748384 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="registry-server" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.748408 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="registry-server" Oct 02 12:11:47 crc kubenswrapper[4783]: E1002 12:11:47.748514 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="extract-content" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.748524 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="extract-content" Oct 02 12:11:47 crc kubenswrapper[4783]: E1002 12:11:47.748558 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="extract-utilities" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.748566 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="extract-utilities" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.749003 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="03f0a97e-4346-482c-a269-1b70030ac48d" containerName="registry-server" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.763695 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.767712 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tmb69"] Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.862885 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-catalog-content\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.863239 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-swsz9\" (UniqueName: \"kubernetes.io/projected/d4851f75-ec62-4979-b48f-ca679b3db80b-kube-api-access-swsz9\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.863444 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-utilities\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.966057 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-catalog-content\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.966190 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-swsz9\" (UniqueName: \"kubernetes.io/projected/d4851f75-ec62-4979-b48f-ca679b3db80b-kube-api-access-swsz9\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.966327 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-utilities\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.966808 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-catalog-content\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.966860 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-utilities\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:47 crc kubenswrapper[4783]: I1002 12:11:47.995711 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-swsz9\" (UniqueName: \"kubernetes.io/projected/d4851f75-ec62-4979-b48f-ca679b3db80b-kube-api-access-swsz9\") pod \"community-operators-tmb69\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:48 crc kubenswrapper[4783]: I1002 12:11:48.085113 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:48 crc kubenswrapper[4783]: I1002 12:11:48.608921 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tmb69"] Oct 02 12:11:48 crc kubenswrapper[4783]: I1002 12:11:48.782561 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmb69" event={"ID":"d4851f75-ec62-4979-b48f-ca679b3db80b","Type":"ContainerStarted","Data":"f01434d2038b3d98edafedfb3246737e9e46095fb10fe5b63c5a16b080aeb6bc"} Oct 02 12:11:49 crc kubenswrapper[4783]: I1002 12:11:49.792728 4783 generic.go:334] "Generic (PLEG): container finished" podID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerID="a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179" exitCode=0 Oct 02 12:11:49 crc kubenswrapper[4783]: I1002 12:11:49.792808 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmb69" event={"ID":"d4851f75-ec62-4979-b48f-ca679b3db80b","Type":"ContainerDied","Data":"a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179"} Oct 02 12:11:49 crc kubenswrapper[4783]: I1002 12:11:49.796713 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 12:11:50 crc kubenswrapper[4783]: I1002 12:11:50.803697 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmb69" event={"ID":"d4851f75-ec62-4979-b48f-ca679b3db80b","Type":"ContainerStarted","Data":"9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24"} Oct 02 12:11:51 crc kubenswrapper[4783]: E1002 12:11:51.827192 4783 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4851f75_ec62_4979_b48f_ca679b3db80b.slice/crio-conmon-9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd4851f75_ec62_4979_b48f_ca679b3db80b.slice/crio-9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24.scope\": RecentStats: unable to find data in memory cache]" Oct 02 12:11:51 crc kubenswrapper[4783]: I1002 12:11:51.827667 4783 generic.go:334] "Generic (PLEG): container finished" podID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerID="9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24" exitCode=0 Oct 02 12:11:51 crc kubenswrapper[4783]: I1002 12:11:51.827761 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmb69" event={"ID":"d4851f75-ec62-4979-b48f-ca679b3db80b","Type":"ContainerDied","Data":"9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24"} Oct 02 12:11:52 crc kubenswrapper[4783]: I1002 12:11:52.839666 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmb69" event={"ID":"d4851f75-ec62-4979-b48f-ca679b3db80b","Type":"ContainerStarted","Data":"14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5"} Oct 02 12:11:52 crc kubenswrapper[4783]: I1002 12:11:52.872144 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tmb69" podStartSLOduration=3.361451363 podStartE2EDuration="5.872115964s" podCreationTimestamp="2025-10-02 12:11:47 +0000 UTC" firstStartedPulling="2025-10-02 12:11:49.794883067 +0000 UTC m=+4743.111077328" lastFinishedPulling="2025-10-02 12:11:52.305547668 +0000 UTC m=+4745.621741929" observedRunningTime="2025-10-02 12:11:52.859933384 +0000 UTC m=+4746.176127675" watchObservedRunningTime="2025-10-02 12:11:52.872115964 +0000 UTC m=+4746.188310265" Oct 02 12:11:58 crc kubenswrapper[4783]: I1002 12:11:58.086847 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:58 crc kubenswrapper[4783]: I1002 12:11:58.087376 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:58 crc kubenswrapper[4783]: I1002 12:11:58.165047 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:58 crc kubenswrapper[4783]: I1002 12:11:58.949849 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:11:59 crc kubenswrapper[4783]: I1002 12:11:59.007284 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tmb69"] Oct 02 12:12:00 crc kubenswrapper[4783]: I1002 12:12:00.936941 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tmb69" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="registry-server" containerID="cri-o://14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5" gracePeriod=2 Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.392270 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.441855 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-catalog-content\") pod \"d4851f75-ec62-4979-b48f-ca679b3db80b\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.441924 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-utilities\") pod \"d4851f75-ec62-4979-b48f-ca679b3db80b\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.442051 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-swsz9\" (UniqueName: \"kubernetes.io/projected/d4851f75-ec62-4979-b48f-ca679b3db80b-kube-api-access-swsz9\") pod \"d4851f75-ec62-4979-b48f-ca679b3db80b\" (UID: \"d4851f75-ec62-4979-b48f-ca679b3db80b\") " Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.443142 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-utilities" (OuterVolumeSpecName: "utilities") pod "d4851f75-ec62-4979-b48f-ca679b3db80b" (UID: "d4851f75-ec62-4979-b48f-ca679b3db80b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.468392 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4851f75-ec62-4979-b48f-ca679b3db80b-kube-api-access-swsz9" (OuterVolumeSpecName: "kube-api-access-swsz9") pod "d4851f75-ec62-4979-b48f-ca679b3db80b" (UID: "d4851f75-ec62-4979-b48f-ca679b3db80b"). InnerVolumeSpecName "kube-api-access-swsz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.544283 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.544314 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-swsz9\" (UniqueName: \"kubernetes.io/projected/d4851f75-ec62-4979-b48f-ca679b3db80b-kube-api-access-swsz9\") on node \"crc\" DevicePath \"\"" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.956792 4783 generic.go:334] "Generic (PLEG): container finished" podID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerID="14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5" exitCode=0 Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.956860 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tmb69" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.956886 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmb69" event={"ID":"d4851f75-ec62-4979-b48f-ca679b3db80b","Type":"ContainerDied","Data":"14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5"} Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.958274 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tmb69" event={"ID":"d4851f75-ec62-4979-b48f-ca679b3db80b","Type":"ContainerDied","Data":"f01434d2038b3d98edafedfb3246737e9e46095fb10fe5b63c5a16b080aeb6bc"} Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.958354 4783 scope.go:117] "RemoveContainer" containerID="14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.984314 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d4851f75-ec62-4979-b48f-ca679b3db80b" (UID: "d4851f75-ec62-4979-b48f-ca679b3db80b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:12:01 crc kubenswrapper[4783]: I1002 12:12:01.986859 4783 scope.go:117] "RemoveContainer" containerID="9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.022641 4783 scope.go:117] "RemoveContainer" containerID="a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.056076 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d4851f75-ec62-4979-b48f-ca679b3db80b-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.086316 4783 scope.go:117] "RemoveContainer" containerID="14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5" Oct 02 12:12:02 crc kubenswrapper[4783]: E1002 12:12:02.086810 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5\": container with ID starting with 14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5 not found: ID does not exist" containerID="14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.086842 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5"} err="failed to get container status \"14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5\": rpc error: code = NotFound desc = could not find container \"14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5\": container with ID starting with 14acd508cb3c4a46198c490351b427a7cbbef1fb84cb35f818adb8cb506158b5 not found: ID does not exist" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.086864 4783 scope.go:117] "RemoveContainer" containerID="9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24" Oct 02 12:12:02 crc kubenswrapper[4783]: E1002 12:12:02.087064 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24\": container with ID starting with 9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24 not found: ID does not exist" containerID="9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.087082 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24"} err="failed to get container status \"9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24\": rpc error: code = NotFound desc = could not find container \"9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24\": container with ID starting with 9b3a6c2312a739e638b698e3b1c7de818c3f817d1182c1edcfd2befd7d5f1c24 not found: ID does not exist" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.087096 4783 scope.go:117] "RemoveContainer" containerID="a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179" Oct 02 12:12:02 crc kubenswrapper[4783]: E1002 12:12:02.087275 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179\": container with ID starting with a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179 not found: ID does not exist" containerID="a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.087296 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179"} err="failed to get container status \"a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179\": rpc error: code = NotFound desc = could not find container \"a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179\": container with ID starting with a7167d8b3b2a1f650d3177816c6265123ddef9a5ae11af37377b1f7528c37179 not found: ID does not exist" Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.300883 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tmb69"] Oct 02 12:12:02 crc kubenswrapper[4783]: I1002 12:12:02.321762 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tmb69"] Oct 02 12:12:03 crc kubenswrapper[4783]: I1002 12:12:03.561406 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" path="/var/lib/kubelet/pods/d4851f75-ec62-4979-b48f-ca679b3db80b/volumes" Oct 02 12:12:21 crc kubenswrapper[4783]: I1002 12:12:21.515200 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:12:21 crc kubenswrapper[4783]: I1002 12:12:21.515832 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:12:51 crc kubenswrapper[4783]: I1002 12:12:51.513890 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:12:51 crc kubenswrapper[4783]: I1002 12:12:51.514401 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.100568 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-mwwmz"] Oct 02 12:13:04 crc kubenswrapper[4783]: E1002 12:13:04.101388 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="extract-content" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.101400 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="extract-content" Oct 02 12:13:04 crc kubenswrapper[4783]: E1002 12:13:04.101439 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="extract-utilities" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.101445 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="extract-utilities" Oct 02 12:13:04 crc kubenswrapper[4783]: E1002 12:13:04.101457 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="registry-server" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.101463 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="registry-server" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.101639 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4851f75-ec62-4979-b48f-ca679b3db80b" containerName="registry-server" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.102978 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.118586 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mwwmz"] Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.249460 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsqlg\" (UniqueName: \"kubernetes.io/projected/c664e350-391d-4b89-b582-0befb56e4e4d-kube-api-access-vsqlg\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.249522 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-utilities\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.249727 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-catalog-content\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.351873 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsqlg\" (UniqueName: \"kubernetes.io/projected/c664e350-391d-4b89-b582-0befb56e4e4d-kube-api-access-vsqlg\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.351938 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-utilities\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.351975 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-catalog-content\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.352502 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-catalog-content\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.352530 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-utilities\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.371990 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsqlg\" (UniqueName: \"kubernetes.io/projected/c664e350-391d-4b89-b582-0befb56e4e4d-kube-api-access-vsqlg\") pod \"redhat-marketplace-mwwmz\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.448668 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:04 crc kubenswrapper[4783]: I1002 12:13:04.987818 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-mwwmz"] Oct 02 12:13:05 crc kubenswrapper[4783]: I1002 12:13:05.595080 4783 generic.go:334] "Generic (PLEG): container finished" podID="c664e350-391d-4b89-b582-0befb56e4e4d" containerID="aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19" exitCode=0 Oct 02 12:13:05 crc kubenswrapper[4783]: I1002 12:13:05.595171 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwwmz" event={"ID":"c664e350-391d-4b89-b582-0befb56e4e4d","Type":"ContainerDied","Data":"aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19"} Oct 02 12:13:05 crc kubenswrapper[4783]: I1002 12:13:05.595522 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwwmz" event={"ID":"c664e350-391d-4b89-b582-0befb56e4e4d","Type":"ContainerStarted","Data":"4a20b340e269502bb5686847452cba75c18b0479b6d99f26bfffcf874a62436c"} Oct 02 12:13:07 crc kubenswrapper[4783]: I1002 12:13:07.621931 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwwmz" event={"ID":"c664e350-391d-4b89-b582-0befb56e4e4d","Type":"ContainerStarted","Data":"a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7"} Oct 02 12:13:08 crc kubenswrapper[4783]: I1002 12:13:08.633982 4783 generic.go:334] "Generic (PLEG): container finished" podID="c664e350-391d-4b89-b582-0befb56e4e4d" containerID="a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7" exitCode=0 Oct 02 12:13:08 crc kubenswrapper[4783]: I1002 12:13:08.634105 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwwmz" event={"ID":"c664e350-391d-4b89-b582-0befb56e4e4d","Type":"ContainerDied","Data":"a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7"} Oct 02 12:13:09 crc kubenswrapper[4783]: I1002 12:13:09.644201 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwwmz" event={"ID":"c664e350-391d-4b89-b582-0befb56e4e4d","Type":"ContainerStarted","Data":"4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69"} Oct 02 12:13:09 crc kubenswrapper[4783]: I1002 12:13:09.665438 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-mwwmz" podStartSLOduration=2.086148633 podStartE2EDuration="5.665403145s" podCreationTimestamp="2025-10-02 12:13:04 +0000 UTC" firstStartedPulling="2025-10-02 12:13:05.598699103 +0000 UTC m=+4818.914893404" lastFinishedPulling="2025-10-02 12:13:09.177953655 +0000 UTC m=+4822.494147916" observedRunningTime="2025-10-02 12:13:09.658313432 +0000 UTC m=+4822.974507693" watchObservedRunningTime="2025-10-02 12:13:09.665403145 +0000 UTC m=+4822.981597406" Oct 02 12:13:14 crc kubenswrapper[4783]: I1002 12:13:14.450163 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:14 crc kubenswrapper[4783]: I1002 12:13:14.450774 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:14 crc kubenswrapper[4783]: I1002 12:13:14.498138 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:14 crc kubenswrapper[4783]: I1002 12:13:14.743186 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:14 crc kubenswrapper[4783]: I1002 12:13:14.864213 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mwwmz"] Oct 02 12:13:16 crc kubenswrapper[4783]: I1002 12:13:16.720482 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-mwwmz" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="registry-server" containerID="cri-o://4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69" gracePeriod=2 Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.218611 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.341974 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-catalog-content\") pod \"c664e350-391d-4b89-b582-0befb56e4e4d\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.342065 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vsqlg\" (UniqueName: \"kubernetes.io/projected/c664e350-391d-4b89-b582-0befb56e4e4d-kube-api-access-vsqlg\") pod \"c664e350-391d-4b89-b582-0befb56e4e4d\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.342152 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-utilities\") pod \"c664e350-391d-4b89-b582-0befb56e4e4d\" (UID: \"c664e350-391d-4b89-b582-0befb56e4e4d\") " Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.342982 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-utilities" (OuterVolumeSpecName: "utilities") pod "c664e350-391d-4b89-b582-0befb56e4e4d" (UID: "c664e350-391d-4b89-b582-0befb56e4e4d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.360665 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c664e350-391d-4b89-b582-0befb56e4e4d-kube-api-access-vsqlg" (OuterVolumeSpecName: "kube-api-access-vsqlg") pod "c664e350-391d-4b89-b582-0befb56e4e4d" (UID: "c664e350-391d-4b89-b582-0befb56e4e4d"). InnerVolumeSpecName "kube-api-access-vsqlg". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.363017 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c664e350-391d-4b89-b582-0befb56e4e4d" (UID: "c664e350-391d-4b89-b582-0befb56e4e4d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.444562 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vsqlg\" (UniqueName: \"kubernetes.io/projected/c664e350-391d-4b89-b582-0befb56e4e4d-kube-api-access-vsqlg\") on node \"crc\" DevicePath \"\"" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.444719 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.444775 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c664e350-391d-4b89-b582-0befb56e4e4d-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.733657 4783 generic.go:334] "Generic (PLEG): container finished" podID="c664e350-391d-4b89-b582-0befb56e4e4d" containerID="4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69" exitCode=0 Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.733767 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-mwwmz" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.733764 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwwmz" event={"ID":"c664e350-391d-4b89-b582-0befb56e4e4d","Type":"ContainerDied","Data":"4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69"} Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.735271 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-mwwmz" event={"ID":"c664e350-391d-4b89-b582-0befb56e4e4d","Type":"ContainerDied","Data":"4a20b340e269502bb5686847452cba75c18b0479b6d99f26bfffcf874a62436c"} Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.735302 4783 scope.go:117] "RemoveContainer" containerID="4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.770086 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-mwwmz"] Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.771092 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-mwwmz"] Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.771736 4783 scope.go:117] "RemoveContainer" containerID="a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.795124 4783 scope.go:117] "RemoveContainer" containerID="aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.838945 4783 scope.go:117] "RemoveContainer" containerID="4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69" Oct 02 12:13:17 crc kubenswrapper[4783]: E1002 12:13:17.839653 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69\": container with ID starting with 4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69 not found: ID does not exist" containerID="4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.839706 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69"} err="failed to get container status \"4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69\": rpc error: code = NotFound desc = could not find container \"4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69\": container with ID starting with 4ceb13fe67e0ba34ec6cefcd3a76ebab6207927aa20a780eecd15bb5c6638d69 not found: ID does not exist" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.839738 4783 scope.go:117] "RemoveContainer" containerID="a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7" Oct 02 12:13:17 crc kubenswrapper[4783]: E1002 12:13:17.840129 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7\": container with ID starting with a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7 not found: ID does not exist" containerID="a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.840254 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7"} err="failed to get container status \"a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7\": rpc error: code = NotFound desc = could not find container \"a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7\": container with ID starting with a2c4ee2c7c0c3b59a3287900aefe384b488990292bb38cfdeba6754b857d89a7 not found: ID does not exist" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.840352 4783 scope.go:117] "RemoveContainer" containerID="aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19" Oct 02 12:13:17 crc kubenswrapper[4783]: E1002 12:13:17.840854 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19\": container with ID starting with aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19 not found: ID does not exist" containerID="aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19" Oct 02 12:13:17 crc kubenswrapper[4783]: I1002 12:13:17.840913 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19"} err="failed to get container status \"aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19\": rpc error: code = NotFound desc = could not find container \"aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19\": container with ID starting with aec18b81614a2a579f38975fd5a428ba167c396fa95b01c0f02c991ae4344b19 not found: ID does not exist" Oct 02 12:13:19 crc kubenswrapper[4783]: I1002 12:13:19.573931 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" path="/var/lib/kubelet/pods/c664e350-391d-4b89-b582-0befb56e4e4d/volumes" Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.513493 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.514015 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.514103 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.515345 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"31c034a19e50d094a581a0f0df605c3e00aa8dd41f6fc7b1675a23f1b1bd8cff"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.515513 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://31c034a19e50d094a581a0f0df605c3e00aa8dd41f6fc7b1675a23f1b1bd8cff" gracePeriod=600 Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.786925 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="31c034a19e50d094a581a0f0df605c3e00aa8dd41f6fc7b1675a23f1b1bd8cff" exitCode=0 Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.786973 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"31c034a19e50d094a581a0f0df605c3e00aa8dd41f6fc7b1675a23f1b1bd8cff"} Oct 02 12:13:21 crc kubenswrapper[4783]: I1002 12:13:21.787009 4783 scope.go:117] "RemoveContainer" containerID="4afe7d365a301e5fc4251ae47070122472d46a4f9f15c46eb823bb99df9853ce" Oct 02 12:13:22 crc kubenswrapper[4783]: I1002 12:13:22.801525 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b"} Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.148908 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl"] Oct 02 12:15:00 crc kubenswrapper[4783]: E1002 12:15:00.149960 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="extract-utilities" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.149977 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="extract-utilities" Oct 02 12:15:00 crc kubenswrapper[4783]: E1002 12:15:00.150003 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="extract-content" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.150013 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="extract-content" Oct 02 12:15:00 crc kubenswrapper[4783]: E1002 12:15:00.150025 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="registry-server" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.150033 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="registry-server" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.150258 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="c664e350-391d-4b89-b582-0befb56e4e4d" containerName="registry-server" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.151169 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.153052 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.158696 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.159321 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl"] Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.235486 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52565495-6a10-4185-b4d4-e303d9d7e623-config-volume\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.235643 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-px2ff\" (UniqueName: \"kubernetes.io/projected/52565495-6a10-4185-b4d4-e303d9d7e623-kube-api-access-px2ff\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.235724 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52565495-6a10-4185-b4d4-e303d9d7e623-secret-volume\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.337506 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52565495-6a10-4185-b4d4-e303d9d7e623-config-volume\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.337659 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-px2ff\" (UniqueName: \"kubernetes.io/projected/52565495-6a10-4185-b4d4-e303d9d7e623-kube-api-access-px2ff\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.337720 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52565495-6a10-4185-b4d4-e303d9d7e623-secret-volume\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.338510 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52565495-6a10-4185-b4d4-e303d9d7e623-config-volume\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.350175 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52565495-6a10-4185-b4d4-e303d9d7e623-secret-volume\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.354213 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-px2ff\" (UniqueName: \"kubernetes.io/projected/52565495-6a10-4185-b4d4-e303d9d7e623-kube-api-access-px2ff\") pod \"collect-profiles-29323455-ccbsl\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.482066 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:00 crc kubenswrapper[4783]: I1002 12:15:00.935727 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl"] Oct 02 12:15:01 crc kubenswrapper[4783]: I1002 12:15:01.771780 4783 generic.go:334] "Generic (PLEG): container finished" podID="52565495-6a10-4185-b4d4-e303d9d7e623" containerID="ee5cebc54ecfbfea462c52c3d5170db325f763b37d84cd2b3afde4bc53e5d3d1" exitCode=0 Oct 02 12:15:01 crc kubenswrapper[4783]: I1002 12:15:01.771831 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" event={"ID":"52565495-6a10-4185-b4d4-e303d9d7e623","Type":"ContainerDied","Data":"ee5cebc54ecfbfea462c52c3d5170db325f763b37d84cd2b3afde4bc53e5d3d1"} Oct 02 12:15:01 crc kubenswrapper[4783]: I1002 12:15:01.772089 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" event={"ID":"52565495-6a10-4185-b4d4-e303d9d7e623","Type":"ContainerStarted","Data":"6c6b97b81d044bbd7264db89f40d5457b6369c5c51b64afb271cd8b8397b759e"} Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.072797 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.195088 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52565495-6a10-4185-b4d4-e303d9d7e623-secret-volume\") pod \"52565495-6a10-4185-b4d4-e303d9d7e623\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.195163 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-px2ff\" (UniqueName: \"kubernetes.io/projected/52565495-6a10-4185-b4d4-e303d9d7e623-kube-api-access-px2ff\") pod \"52565495-6a10-4185-b4d4-e303d9d7e623\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.195253 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52565495-6a10-4185-b4d4-e303d9d7e623-config-volume\") pod \"52565495-6a10-4185-b4d4-e303d9d7e623\" (UID: \"52565495-6a10-4185-b4d4-e303d9d7e623\") " Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.196245 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/52565495-6a10-4185-b4d4-e303d9d7e623-config-volume" (OuterVolumeSpecName: "config-volume") pod "52565495-6a10-4185-b4d4-e303d9d7e623" (UID: "52565495-6a10-4185-b4d4-e303d9d7e623"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.208566 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/52565495-6a10-4185-b4d4-e303d9d7e623-kube-api-access-px2ff" (OuterVolumeSpecName: "kube-api-access-px2ff") pod "52565495-6a10-4185-b4d4-e303d9d7e623" (UID: "52565495-6a10-4185-b4d4-e303d9d7e623"). InnerVolumeSpecName "kube-api-access-px2ff". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.209590 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/52565495-6a10-4185-b4d4-e303d9d7e623-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "52565495-6a10-4185-b4d4-e303d9d7e623" (UID: "52565495-6a10-4185-b4d4-e303d9d7e623"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.297017 4783 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/52565495-6a10-4185-b4d4-e303d9d7e623-config-volume\") on node \"crc\" DevicePath \"\"" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.297064 4783 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/52565495-6a10-4185-b4d4-e303d9d7e623-secret-volume\") on node \"crc\" DevicePath \"\"" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.297073 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-px2ff\" (UniqueName: \"kubernetes.io/projected/52565495-6a10-4185-b4d4-e303d9d7e623-kube-api-access-px2ff\") on node \"crc\" DevicePath \"\"" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.792557 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" event={"ID":"52565495-6a10-4185-b4d4-e303d9d7e623","Type":"ContainerDied","Data":"6c6b97b81d044bbd7264db89f40d5457b6369c5c51b64afb271cd8b8397b759e"} Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.792603 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c6b97b81d044bbd7264db89f40d5457b6369c5c51b64afb271cd8b8397b759e" Oct 02 12:15:03 crc kubenswrapper[4783]: I1002 12:15:03.792644 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29323455-ccbsl" Oct 02 12:15:04 crc kubenswrapper[4783]: I1002 12:15:04.152981 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf"] Oct 02 12:15:04 crc kubenswrapper[4783]: I1002 12:15:04.164243 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29323410-6skmf"] Oct 02 12:15:05 crc kubenswrapper[4783]: I1002 12:15:05.562762 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00976817-6948-47a6-9a09-1aa2c5c96052" path="/var/lib/kubelet/pods/00976817-6948-47a6-9a09-1aa2c5c96052/volumes" Oct 02 12:15:51 crc kubenswrapper[4783]: I1002 12:15:51.513288 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:15:51 crc kubenswrapper[4783]: I1002 12:15:51.513837 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:15:57 crc kubenswrapper[4783]: I1002 12:15:57.352633 4783 scope.go:117] "RemoveContainer" containerID="ad81ccbdb85c148d0039bc7ecfc41cb9d704f30e17a6b3d4e8c3a51fc1d92375" Oct 02 12:16:21 crc kubenswrapper[4783]: I1002 12:16:21.513822 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:16:21 crc kubenswrapper[4783]: I1002 12:16:21.514542 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.513681 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.514256 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.514316 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.515365 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.515501 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" gracePeriod=600 Oct 02 12:16:51 crc kubenswrapper[4783]: E1002 12:16:51.654952 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.920704 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" exitCode=0 Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.920744 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b"} Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.920783 4783 scope.go:117] "RemoveContainer" containerID="31c034a19e50d094a581a0f0df605c3e00aa8dd41f6fc7b1675a23f1b1bd8cff" Oct 02 12:16:51 crc kubenswrapper[4783]: I1002 12:16:51.921309 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:16:51 crc kubenswrapper[4783]: E1002 12:16:51.921576 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:16:57 crc kubenswrapper[4783]: I1002 12:16:57.446871 4783 scope.go:117] "RemoveContainer" containerID="ab38f89326c5f893f452b48d257487f4da87954ef33e38a356c3a938b9d3d5d6" Oct 02 12:16:57 crc kubenswrapper[4783]: I1002 12:16:57.470886 4783 scope.go:117] "RemoveContainer" containerID="a324bd3265e57c298602b44427ef7de53ec2840671abf8087fde27bf2bdc3b2e" Oct 02 12:16:57 crc kubenswrapper[4783]: I1002 12:16:57.496125 4783 scope.go:117] "RemoveContainer" containerID="88996f376f5089e7069830d09f9cec8eb6a520239c5af6eaeab6041f85720cb8" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.565811 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-97m22"] Oct 02 12:17:01 crc kubenswrapper[4783]: E1002 12:17:01.566882 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="52565495-6a10-4185-b4d4-e303d9d7e623" containerName="collect-profiles" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.566919 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="52565495-6a10-4185-b4d4-e303d9d7e623" containerName="collect-profiles" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.567285 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="52565495-6a10-4185-b4d4-e303d9d7e623" containerName="collect-profiles" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.569544 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.571066 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-97m22"] Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.620197 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xtsm6\" (UniqueName: \"kubernetes.io/projected/4f0f1968-12d7-44f2-a9f5-ab73b439b400-kube-api-access-xtsm6\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.620308 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-utilities\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.620483 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-catalog-content\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.721737 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xtsm6\" (UniqueName: \"kubernetes.io/projected/4f0f1968-12d7-44f2-a9f5-ab73b439b400-kube-api-access-xtsm6\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.721899 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-utilities\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.722008 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-catalog-content\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.722478 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-utilities\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.722578 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-catalog-content\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.743094 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xtsm6\" (UniqueName: \"kubernetes.io/projected/4f0f1968-12d7-44f2-a9f5-ab73b439b400-kube-api-access-xtsm6\") pod \"certified-operators-97m22\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:01 crc kubenswrapper[4783]: I1002 12:17:01.895973 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:02 crc kubenswrapper[4783]: I1002 12:17:02.230804 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-97m22"] Oct 02 12:17:03 crc kubenswrapper[4783]: I1002 12:17:03.089756 4783 generic.go:334] "Generic (PLEG): container finished" podID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerID="a773d4c7616f5f0bdf8bc65dc9205385794e3ac43e278535a7765e70e205d66e" exitCode=0 Oct 02 12:17:03 crc kubenswrapper[4783]: I1002 12:17:03.089853 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97m22" event={"ID":"4f0f1968-12d7-44f2-a9f5-ab73b439b400","Type":"ContainerDied","Data":"a773d4c7616f5f0bdf8bc65dc9205385794e3ac43e278535a7765e70e205d66e"} Oct 02 12:17:03 crc kubenswrapper[4783]: I1002 12:17:03.090526 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97m22" event={"ID":"4f0f1968-12d7-44f2-a9f5-ab73b439b400","Type":"ContainerStarted","Data":"4c109858c74ed5046aa2f5c3e361d4ece4a38dd506e43a89df656b35059cafdd"} Oct 02 12:17:03 crc kubenswrapper[4783]: I1002 12:17:03.095632 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 12:17:05 crc kubenswrapper[4783]: I1002 12:17:05.117940 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97m22" event={"ID":"4f0f1968-12d7-44f2-a9f5-ab73b439b400","Type":"ContainerStarted","Data":"340edf34791ea196d0824a72ca79bb35d033b2517a86ad3df8160eafefeb2a53"} Oct 02 12:17:05 crc kubenswrapper[4783]: I1002 12:17:05.545293 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:17:05 crc kubenswrapper[4783]: E1002 12:17:05.545660 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:17:08 crc kubenswrapper[4783]: I1002 12:17:08.153972 4783 generic.go:334] "Generic (PLEG): container finished" podID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerID="340edf34791ea196d0824a72ca79bb35d033b2517a86ad3df8160eafefeb2a53" exitCode=0 Oct 02 12:17:08 crc kubenswrapper[4783]: I1002 12:17:08.154049 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97m22" event={"ID":"4f0f1968-12d7-44f2-a9f5-ab73b439b400","Type":"ContainerDied","Data":"340edf34791ea196d0824a72ca79bb35d033b2517a86ad3df8160eafefeb2a53"} Oct 02 12:17:10 crc kubenswrapper[4783]: I1002 12:17:10.190824 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97m22" event={"ID":"4f0f1968-12d7-44f2-a9f5-ab73b439b400","Type":"ContainerStarted","Data":"1214615520c0a9ff043c516b73b63ffbddee49be398ada41d5438a9dfea88957"} Oct 02 12:17:10 crc kubenswrapper[4783]: I1002 12:17:10.213514 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-97m22" podStartSLOduration=2.668763782 podStartE2EDuration="9.213487055s" podCreationTimestamp="2025-10-02 12:17:01 +0000 UTC" firstStartedPulling="2025-10-02 12:17:03.095189921 +0000 UTC m=+5056.411384222" lastFinishedPulling="2025-10-02 12:17:09.639913194 +0000 UTC m=+5062.956107495" observedRunningTime="2025-10-02 12:17:10.209281351 +0000 UTC m=+5063.525475602" watchObservedRunningTime="2025-10-02 12:17:10.213487055 +0000 UTC m=+5063.529681316" Oct 02 12:17:11 crc kubenswrapper[4783]: I1002 12:17:11.896148 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:11 crc kubenswrapper[4783]: I1002 12:17:11.897534 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:11 crc kubenswrapper[4783]: I1002 12:17:11.958822 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:18 crc kubenswrapper[4783]: I1002 12:17:18.545027 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:17:18 crc kubenswrapper[4783]: E1002 12:17:18.546126 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:17:21 crc kubenswrapper[4783]: I1002 12:17:21.953585 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:21 crc kubenswrapper[4783]: I1002 12:17:21.998562 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-97m22"] Oct 02 12:17:22 crc kubenswrapper[4783]: I1002 12:17:22.324954 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-97m22" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="registry-server" containerID="cri-o://1214615520c0a9ff043c516b73b63ffbddee49be398ada41d5438a9dfea88957" gracePeriod=2 Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.351550 4783 generic.go:334] "Generic (PLEG): container finished" podID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerID="1214615520c0a9ff043c516b73b63ffbddee49be398ada41d5438a9dfea88957" exitCode=0 Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.351643 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97m22" event={"ID":"4f0f1968-12d7-44f2-a9f5-ab73b439b400","Type":"ContainerDied","Data":"1214615520c0a9ff043c516b73b63ffbddee49be398ada41d5438a9dfea88957"} Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.420296 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.578534 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xtsm6\" (UniqueName: \"kubernetes.io/projected/4f0f1968-12d7-44f2-a9f5-ab73b439b400-kube-api-access-xtsm6\") pod \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.578602 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-catalog-content\") pod \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.578645 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-utilities\") pod \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\" (UID: \"4f0f1968-12d7-44f2-a9f5-ab73b439b400\") " Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.580116 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-utilities" (OuterVolumeSpecName: "utilities") pod "4f0f1968-12d7-44f2-a9f5-ab73b439b400" (UID: "4f0f1968-12d7-44f2-a9f5-ab73b439b400"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.583356 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4f0f1968-12d7-44f2-a9f5-ab73b439b400-kube-api-access-xtsm6" (OuterVolumeSpecName: "kube-api-access-xtsm6") pod "4f0f1968-12d7-44f2-a9f5-ab73b439b400" (UID: "4f0f1968-12d7-44f2-a9f5-ab73b439b400"). InnerVolumeSpecName "kube-api-access-xtsm6". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.630107 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4f0f1968-12d7-44f2-a9f5-ab73b439b400" (UID: "4f0f1968-12d7-44f2-a9f5-ab73b439b400"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.681454 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.681488 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4f0f1968-12d7-44f2-a9f5-ab73b439b400-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:17:23 crc kubenswrapper[4783]: I1002 12:17:23.681501 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xtsm6\" (UniqueName: \"kubernetes.io/projected/4f0f1968-12d7-44f2-a9f5-ab73b439b400-kube-api-access-xtsm6\") on node \"crc\" DevicePath \"\"" Oct 02 12:17:24 crc kubenswrapper[4783]: I1002 12:17:24.364926 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-97m22" event={"ID":"4f0f1968-12d7-44f2-a9f5-ab73b439b400","Type":"ContainerDied","Data":"4c109858c74ed5046aa2f5c3e361d4ece4a38dd506e43a89df656b35059cafdd"} Oct 02 12:17:24 crc kubenswrapper[4783]: I1002 12:17:24.364989 4783 scope.go:117] "RemoveContainer" containerID="1214615520c0a9ff043c516b73b63ffbddee49be398ada41d5438a9dfea88957" Oct 02 12:17:24 crc kubenswrapper[4783]: I1002 12:17:24.365046 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-97m22" Oct 02 12:17:24 crc kubenswrapper[4783]: I1002 12:17:24.384029 4783 scope.go:117] "RemoveContainer" containerID="340edf34791ea196d0824a72ca79bb35d033b2517a86ad3df8160eafefeb2a53" Oct 02 12:17:24 crc kubenswrapper[4783]: I1002 12:17:24.419178 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-97m22"] Oct 02 12:17:24 crc kubenswrapper[4783]: I1002 12:17:24.426367 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-97m22"] Oct 02 12:17:24 crc kubenswrapper[4783]: I1002 12:17:24.639829 4783 scope.go:117] "RemoveContainer" containerID="a773d4c7616f5f0bdf8bc65dc9205385794e3ac43e278535a7765e70e205d66e" Oct 02 12:17:25 crc kubenswrapper[4783]: I1002 12:17:25.563599 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" path="/var/lib/kubelet/pods/4f0f1968-12d7-44f2-a9f5-ab73b439b400/volumes" Oct 02 12:17:29 crc kubenswrapper[4783]: I1002 12:17:29.544628 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:17:29 crc kubenswrapper[4783]: E1002 12:17:29.545834 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:17:40 crc kubenswrapper[4783]: I1002 12:17:40.544565 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:17:40 crc kubenswrapper[4783]: E1002 12:17:40.545580 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:17:52 crc kubenswrapper[4783]: I1002 12:17:52.545366 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:17:52 crc kubenswrapper[4783]: E1002 12:17:52.546175 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:18:07 crc kubenswrapper[4783]: I1002 12:18:07.559492 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:18:07 crc kubenswrapper[4783]: E1002 12:18:07.560204 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:18:21 crc kubenswrapper[4783]: I1002 12:18:21.545493 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:18:21 crc kubenswrapper[4783]: E1002 12:18:21.546592 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:18:32 crc kubenswrapper[4783]: I1002 12:18:32.545604 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:18:32 crc kubenswrapper[4783]: E1002 12:18:32.546551 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:18:43 crc kubenswrapper[4783]: I1002 12:18:43.544974 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:18:43 crc kubenswrapper[4783]: E1002 12:18:43.545993 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:18:58 crc kubenswrapper[4783]: I1002 12:18:58.545407 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:18:58 crc kubenswrapper[4783]: E1002 12:18:58.546114 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:19:09 crc kubenswrapper[4783]: I1002 12:19:09.546360 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:19:09 crc kubenswrapper[4783]: E1002 12:19:09.547621 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:19:24 crc kubenswrapper[4783]: I1002 12:19:24.545330 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:19:24 crc kubenswrapper[4783]: E1002 12:19:24.546426 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:19:39 crc kubenswrapper[4783]: I1002 12:19:39.545546 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:19:39 crc kubenswrapper[4783]: E1002 12:19:39.546501 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:19:51 crc kubenswrapper[4783]: I1002 12:19:51.544792 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:19:51 crc kubenswrapper[4783]: E1002 12:19:51.547374 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:20:06 crc kubenswrapper[4783]: I1002 12:20:06.544568 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:20:06 crc kubenswrapper[4783]: E1002 12:20:06.545406 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.168252 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-52dnv/must-gather-q5nb7"] Oct 02 12:20:15 crc kubenswrapper[4783]: E1002 12:20:15.170658 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="registry-server" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.170741 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="registry-server" Oct 02 12:20:15 crc kubenswrapper[4783]: E1002 12:20:15.170810 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="extract-content" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.170871 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="extract-content" Oct 02 12:20:15 crc kubenswrapper[4783]: E1002 12:20:15.170939 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="extract-utilities" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.170992 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="extract-utilities" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.171207 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="4f0f1968-12d7-44f2-a9f5-ab73b439b400" containerName="registry-server" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.172200 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.174641 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-52dnv"/"kube-root-ca.crt" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.174641 4783 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-52dnv"/"openshift-service-ca.crt" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.174705 4783 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-52dnv"/"default-dockercfg-bk85f" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.186448 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-52dnv/must-gather-q5nb7"] Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.229692 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d11528d7-0229-43e7-90fe-cd6f19b61a2a-must-gather-output\") pod \"must-gather-q5nb7\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.229779 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kcxkc\" (UniqueName: \"kubernetes.io/projected/d11528d7-0229-43e7-90fe-cd6f19b61a2a-kube-api-access-kcxkc\") pod \"must-gather-q5nb7\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.332077 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d11528d7-0229-43e7-90fe-cd6f19b61a2a-must-gather-output\") pod \"must-gather-q5nb7\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.332356 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kcxkc\" (UniqueName: \"kubernetes.io/projected/d11528d7-0229-43e7-90fe-cd6f19b61a2a-kube-api-access-kcxkc\") pod \"must-gather-q5nb7\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.332529 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d11528d7-0229-43e7-90fe-cd6f19b61a2a-must-gather-output\") pod \"must-gather-q5nb7\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.356238 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kcxkc\" (UniqueName: \"kubernetes.io/projected/d11528d7-0229-43e7-90fe-cd6f19b61a2a-kube-api-access-kcxkc\") pod \"must-gather-q5nb7\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:15 crc kubenswrapper[4783]: I1002 12:20:15.546990 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:20:16 crc kubenswrapper[4783]: I1002 12:20:16.053996 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-52dnv/must-gather-q5nb7"] Oct 02 12:20:16 crc kubenswrapper[4783]: I1002 12:20:16.075291 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/must-gather-q5nb7" event={"ID":"d11528d7-0229-43e7-90fe-cd6f19b61a2a","Type":"ContainerStarted","Data":"d794775e814d61590cba22bb85b91de3ef875ed33a3538557c2f0d7f89bb9014"} Oct 02 12:20:18 crc kubenswrapper[4783]: I1002 12:20:18.545273 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:20:18 crc kubenswrapper[4783]: E1002 12:20:18.546376 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:20:25 crc kubenswrapper[4783]: I1002 12:20:25.175589 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/must-gather-q5nb7" event={"ID":"d11528d7-0229-43e7-90fe-cd6f19b61a2a","Type":"ContainerStarted","Data":"1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2"} Oct 02 12:20:26 crc kubenswrapper[4783]: I1002 12:20:26.189008 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/must-gather-q5nb7" event={"ID":"d11528d7-0229-43e7-90fe-cd6f19b61a2a","Type":"ContainerStarted","Data":"9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc"} Oct 02 12:20:26 crc kubenswrapper[4783]: I1002 12:20:26.210314 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-52dnv/must-gather-q5nb7" podStartSLOduration=2.460575678 podStartE2EDuration="11.210294567s" podCreationTimestamp="2025-10-02 12:20:15 +0000 UTC" firstStartedPulling="2025-10-02 12:20:16.058320816 +0000 UTC m=+5249.374515077" lastFinishedPulling="2025-10-02 12:20:24.808039705 +0000 UTC m=+5258.124233966" observedRunningTime="2025-10-02 12:20:26.204028576 +0000 UTC m=+5259.520222847" watchObservedRunningTime="2025-10-02 12:20:26.210294567 +0000 UTC m=+5259.526488828" Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.686378 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-52dnv/crc-debug-nv4h7"] Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.688085 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.778864 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5brc\" (UniqueName: \"kubernetes.io/projected/02f9b876-95e8-49ed-89ca-b51c262a7cdc-kube-api-access-p5brc\") pod \"crc-debug-nv4h7\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.778953 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f9b876-95e8-49ed-89ca-b51c262a7cdc-host\") pod \"crc-debug-nv4h7\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.880792 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f9b876-95e8-49ed-89ca-b51c262a7cdc-host\") pod \"crc-debug-nv4h7\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.880931 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5brc\" (UniqueName: \"kubernetes.io/projected/02f9b876-95e8-49ed-89ca-b51c262a7cdc-kube-api-access-p5brc\") pod \"crc-debug-nv4h7\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.881245 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f9b876-95e8-49ed-89ca-b51c262a7cdc-host\") pod \"crc-debug-nv4h7\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:30 crc kubenswrapper[4783]: I1002 12:20:30.899049 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5brc\" (UniqueName: \"kubernetes.io/projected/02f9b876-95e8-49ed-89ca-b51c262a7cdc-kube-api-access-p5brc\") pod \"crc-debug-nv4h7\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:31 crc kubenswrapper[4783]: I1002 12:20:31.014888 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:20:32 crc kubenswrapper[4783]: I1002 12:20:32.255405 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" event={"ID":"02f9b876-95e8-49ed-89ca-b51c262a7cdc","Type":"ContainerStarted","Data":"def643acb7a92eee8d13ea2b6bfcd969e1f0025643e63fd01024a6a53fbc361e"} Oct 02 12:20:33 crc kubenswrapper[4783]: I1002 12:20:33.544989 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:20:33 crc kubenswrapper[4783]: E1002 12:20:33.545760 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:20:45 crc kubenswrapper[4783]: I1002 12:20:45.387553 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" event={"ID":"02f9b876-95e8-49ed-89ca-b51c262a7cdc","Type":"ContainerStarted","Data":"1c453a9b6f515cc1a7532dce0ec4a7ea7fe3a5db7ab58deabdf5cfbb6f485707"} Oct 02 12:20:45 crc kubenswrapper[4783]: I1002 12:20:45.416804 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" podStartSLOduration=2.197809412 podStartE2EDuration="15.416780667s" podCreationTimestamp="2025-10-02 12:20:30 +0000 UTC" firstStartedPulling="2025-10-02 12:20:31.249124057 +0000 UTC m=+5264.565318348" lastFinishedPulling="2025-10-02 12:20:44.468095342 +0000 UTC m=+5277.784289603" observedRunningTime="2025-10-02 12:20:45.406624651 +0000 UTC m=+5278.722818922" watchObservedRunningTime="2025-10-02 12:20:45.416780667 +0000 UTC m=+5278.732974938" Oct 02 12:20:48 crc kubenswrapper[4783]: I1002 12:20:48.545872 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:20:48 crc kubenswrapper[4783]: E1002 12:20:48.546660 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:21:02 crc kubenswrapper[4783]: I1002 12:21:02.544774 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:21:02 crc kubenswrapper[4783]: E1002 12:21:02.545852 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:21:16 crc kubenswrapper[4783]: I1002 12:21:16.545295 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:21:16 crc kubenswrapper[4783]: E1002 12:21:16.546218 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.555678 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pvfql"] Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.558192 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.571222 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvfql"] Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.755894 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgnls\" (UniqueName: \"kubernetes.io/projected/67e435fd-09ce-4d8d-aca3-4733bae6d63a-kube-api-access-wgnls\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.756085 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-catalog-content\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.756124 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-utilities\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.858655 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-catalog-content\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.858702 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-utilities\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.858833 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgnls\" (UniqueName: \"kubernetes.io/projected/67e435fd-09ce-4d8d-aca3-4733bae6d63a-kube-api-access-wgnls\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.859264 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-catalog-content\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.936521 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-utilities\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:26 crc kubenswrapper[4783]: I1002 12:21:26.949151 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgnls\" (UniqueName: \"kubernetes.io/projected/67e435fd-09ce-4d8d-aca3-4733bae6d63a-kube-api-access-wgnls\") pod \"redhat-operators-pvfql\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:27 crc kubenswrapper[4783]: I1002 12:21:27.200738 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:30 crc kubenswrapper[4783]: I1002 12:21:30.545060 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:21:30 crc kubenswrapper[4783]: E1002 12:21:30.546465 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:21:30 crc kubenswrapper[4783]: I1002 12:21:30.863080 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pvfql"] Oct 02 12:21:31 crc kubenswrapper[4783]: I1002 12:21:31.840062 4783 generic.go:334] "Generic (PLEG): container finished" podID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerID="684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07" exitCode=0 Oct 02 12:21:31 crc kubenswrapper[4783]: I1002 12:21:31.840648 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvfql" event={"ID":"67e435fd-09ce-4d8d-aca3-4733bae6d63a","Type":"ContainerDied","Data":"684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07"} Oct 02 12:21:31 crc kubenswrapper[4783]: I1002 12:21:31.840693 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvfql" event={"ID":"67e435fd-09ce-4d8d-aca3-4733bae6d63a","Type":"ContainerStarted","Data":"7b0152f6b44813f7a2c1d8743819e539a9d5e74590a6440d200efccde442eb28"} Oct 02 12:21:33 crc kubenswrapper[4783]: I1002 12:21:33.862387 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvfql" event={"ID":"67e435fd-09ce-4d8d-aca3-4733bae6d63a","Type":"ContainerStarted","Data":"6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83"} Oct 02 12:21:44 crc kubenswrapper[4783]: I1002 12:21:44.544511 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:21:44 crc kubenswrapper[4783]: E1002 12:21:44.545240 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:21:45 crc kubenswrapper[4783]: I1002 12:21:45.975168 4783 generic.go:334] "Generic (PLEG): container finished" podID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerID="6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83" exitCode=0 Oct 02 12:21:45 crc kubenswrapper[4783]: I1002 12:21:45.975229 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvfql" event={"ID":"67e435fd-09ce-4d8d-aca3-4733bae6d63a","Type":"ContainerDied","Data":"6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83"} Oct 02 12:21:46 crc kubenswrapper[4783]: I1002 12:21:46.985924 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvfql" event={"ID":"67e435fd-09ce-4d8d-aca3-4733bae6d63a","Type":"ContainerStarted","Data":"ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d"} Oct 02 12:21:47 crc kubenswrapper[4783]: I1002 12:21:47.010536 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pvfql" podStartSLOduration=6.406719039 podStartE2EDuration="21.010507333s" podCreationTimestamp="2025-10-02 12:21:26 +0000 UTC" firstStartedPulling="2025-10-02 12:21:31.844379523 +0000 UTC m=+5325.160573794" lastFinishedPulling="2025-10-02 12:21:46.448167827 +0000 UTC m=+5339.764362088" observedRunningTime="2025-10-02 12:21:47.003165183 +0000 UTC m=+5340.319359454" watchObservedRunningTime="2025-10-02 12:21:47.010507333 +0000 UTC m=+5340.326701594" Oct 02 12:21:47 crc kubenswrapper[4783]: I1002 12:21:47.201158 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:47 crc kubenswrapper[4783]: I1002 12:21:47.201210 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:21:48 crc kubenswrapper[4783]: I1002 12:21:48.254676 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvfql" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" probeResult="failure" output=< Oct 02 12:21:48 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:21:48 crc kubenswrapper[4783]: > Oct 02 12:21:56 crc kubenswrapper[4783]: I1002 12:21:56.544932 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:21:57 crc kubenswrapper[4783]: I1002 12:21:57.082383 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"670704b352d07cf874134954cd2644f7bff5d175ece897d3e20733251f4c874c"} Oct 02 12:21:58 crc kubenswrapper[4783]: I1002 12:21:58.275736 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvfql" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" probeResult="failure" output=< Oct 02 12:21:58 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:21:58 crc kubenswrapper[4783]: > Oct 02 12:22:08 crc kubenswrapper[4783]: I1002 12:22:08.252141 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvfql" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" probeResult="failure" output=< Oct 02 12:22:08 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:22:08 crc kubenswrapper[4783]: > Oct 02 12:22:11 crc kubenswrapper[4783]: I1002 12:22:11.238903 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5bb7dfc68d-7dl59_9f9ca941-0e76-41d9-b0c0-7a4dada2640b/barbican-api-log/0.log" Oct 02 12:22:11 crc kubenswrapper[4783]: I1002 12:22:11.352027 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-api-5bb7dfc68d-7dl59_9f9ca941-0e76-41d9-b0c0-7a4dada2640b/barbican-api/0.log" Oct 02 12:22:11 crc kubenswrapper[4783]: I1002 12:22:11.588657 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6864f89bf8-2zjrd_a5f3639d-51fc-440a-819f-2cbcc93adda0/barbican-keystone-listener/0.log" Oct 02 12:22:11 crc kubenswrapper[4783]: I1002 12:22:11.706077 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-keystone-listener-6864f89bf8-2zjrd_a5f3639d-51fc-440a-819f-2cbcc93adda0/barbican-keystone-listener-log/0.log" Oct 02 12:22:11 crc kubenswrapper[4783]: I1002 12:22:11.832509 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7485d5d8b5-7jbpr_47c9e784-ab99-406c-9dd6-c0b10742349e/barbican-worker/0.log" Oct 02 12:22:11 crc kubenswrapper[4783]: I1002 12:22:11.976550 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_barbican-worker-7485d5d8b5-7jbpr_47c9e784-ab99-406c-9dd6-c0b10742349e/barbican-worker-log/0.log" Oct 02 12:22:12 crc kubenswrapper[4783]: I1002 12:22:12.191650 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_bootstrap-edpm-deployment-openstack-edpm-ipam-9mr2x_eca56991-989e-44fc-9bb6-ee52ef352d73/bootstrap-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:12 crc kubenswrapper[4783]: I1002 12:22:12.438713 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd/ceilometer-central-agent/0.log" Oct 02 12:22:12 crc kubenswrapper[4783]: I1002 12:22:12.493603 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd/ceilometer-notification-agent/0.log" Oct 02 12:22:12 crc kubenswrapper[4783]: I1002 12:22:12.615799 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd/proxy-httpd/0.log" Oct 02 12:22:12 crc kubenswrapper[4783]: I1002 12:22:12.691049 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ceilometer-0_fbc080e2-1a84-43b9-8c3c-d6ef8b25a8cd/sg-core/0.log" Oct 02 12:22:12 crc kubenswrapper[4783]: I1002 12:22:12.991542 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_88339a82-647c-4a4d-a42e-aa70a74c3bd0/cinder-api-log/0.log" Oct 02 12:22:13 crc kubenswrapper[4783]: I1002 12:22:13.023761 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-api-0_88339a82-647c-4a4d-a42e-aa70a74c3bd0/cinder-api/0.log" Oct 02 12:22:13 crc kubenswrapper[4783]: I1002 12:22:13.279921 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b2e9930a-1339-45f4-9a48-4b4cba57c5b5/cinder-scheduler/0.log" Oct 02 12:22:13 crc kubenswrapper[4783]: I1002 12:22:13.344163 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_cinder-scheduler-0_b2e9930a-1339-45f4-9a48-4b4cba57c5b5/probe/0.log" Oct 02 12:22:13 crc kubenswrapper[4783]: I1002 12:22:13.592942 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-network-edpm-deployment-openstack-edpm-ipam-nsfvn_0a226960-8105-46f9-b2dc-fc4347bec328/configure-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:13 crc kubenswrapper[4783]: I1002 12:22:13.871842 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-hk5mj_654acc46-b6ca-40e2-a3be-1a6fb20ecd90/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:13 crc kubenswrapper[4783]: I1002 12:22:13.979090 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_configure-os-edpm-deployment-openstack-edpm-ipam-rwwwd_29cd1f95-5f6b-451a-81a4-d78e56e04c43/configure-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:14 crc kubenswrapper[4783]: I1002 12:22:14.518316 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6bc556cf6f-vwb79_c3943271-c02d-402e-a2a4-be3a58a82302/init/0.log" Oct 02 12:22:14 crc kubenswrapper[4783]: I1002 12:22:14.774942 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6bc556cf6f-vwb79_c3943271-c02d-402e-a2a4-be3a58a82302/init/0.log" Oct 02 12:22:14 crc kubenswrapper[4783]: I1002 12:22:14.941895 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_dnsmasq-dns-6bc556cf6f-vwb79_c3943271-c02d-402e-a2a4-be3a58a82302/dnsmasq-dns/0.log" Oct 02 12:22:15 crc kubenswrapper[4783]: I1002 12:22:15.148156 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_download-cache-edpm-deployment-openstack-edpm-ipam-579nn_064976fc-7c96-48ee-9a31-869b82f4b6da/download-cache-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:15 crc kubenswrapper[4783]: I1002 12:22:15.216310 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_7efac855-15fd-4a91-bd78-b7ad296dd6b6/glance-httpd/0.log" Oct 02 12:22:15 crc kubenswrapper[4783]: I1002 12:22:15.307385 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-external-api-0_7efac855-15fd-4a91-bd78-b7ad296dd6b6/glance-log/0.log" Oct 02 12:22:15 crc kubenswrapper[4783]: I1002 12:22:15.490910 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d68ff5d3-015e-4fb4-a1e2-617122af5a45/glance-httpd/0.log" Oct 02 12:22:15 crc kubenswrapper[4783]: I1002 12:22:15.667485 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_glance-default-internal-api-0_d68ff5d3-015e-4fb4-a1e2-617122af5a45/glance-log/0.log" Oct 02 12:22:15 crc kubenswrapper[4783]: I1002 12:22:15.974808 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-567b57d86d-gv6fq_48c11fb6-76f0-4028-a76f-6f67904bf3aa/horizon/3.log" Oct 02 12:22:16 crc kubenswrapper[4783]: I1002 12:22:16.130938 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-567b57d86d-gv6fq_48c11fb6-76f0-4028-a76f-6f67904bf3aa/horizon/2.log" Oct 02 12:22:16 crc kubenswrapper[4783]: I1002 12:22:16.337172 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-certs-edpm-deployment-openstack-edpm-ipam-f26lf_3fa822ea-7474-41b4-8203-6089f1eb37cc/install-certs-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:16 crc kubenswrapper[4783]: I1002 12:22:16.489454 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_horizon-567b57d86d-gv6fq_48c11fb6-76f0-4028-a76f-6f67904bf3aa/horizon-log/0.log" Oct 02 12:22:16 crc kubenswrapper[4783]: I1002 12:22:16.512073 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_install-os-edpm-deployment-openstack-edpm-ipam-7kjqb_ccf4cc85-e2ac-4332-afb4-9dde935527f0/install-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:16 crc kubenswrapper[4783]: I1002 12:22:16.736823 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-6c6c7fffd4-bbhdp_2ead5b5c-0e3e-4479-9d0f-affe273fe41d/keystone-api/0.log" Oct 02 12:22:16 crc kubenswrapper[4783]: I1002 12:22:16.820335 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_keystone-cron-29323441-wrfzv_4c76682e-325d-4531-9afb-fcaed9ed292d/keystone-cron/0.log" Oct 02 12:22:17 crc kubenswrapper[4783]: I1002 12:22:17.027592 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_kube-state-metrics-0_31b6e57e-e409-4847-9fd6-86778b935975/kube-state-metrics/0.log" Oct 02 12:22:17 crc kubenswrapper[4783]: I1002 12:22:17.151561 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_libvirt-edpm-deployment-openstack-edpm-ipam-976df_2399815c-c05e-4429-a4e2-163eb5893cc0/libvirt-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:17 crc kubenswrapper[4783]: I1002 12:22:17.438375 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7c59bccbcc-jsxj5_a26366ce-bca3-4af1-93ec-73a5d4a67705/neutron-api/0.log" Oct 02 12:22:17 crc kubenswrapper[4783]: I1002 12:22:17.570523 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-7c59bccbcc-jsxj5_a26366ce-bca3-4af1-93ec-73a5d4a67705/neutron-httpd/0.log" Oct 02 12:22:17 crc kubenswrapper[4783]: I1002 12:22:17.848271 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_neutron-metadata-edpm-deployment-openstack-edpm-ipam-pftxq_271e221f-74f1-4a26-9c0a-1f867d5b56e4/neutron-metadata-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:18 crc kubenswrapper[4783]: I1002 12:22:18.247185 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvfql" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" probeResult="failure" output=< Oct 02 12:22:18 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:22:18 crc kubenswrapper[4783]: > Oct 02 12:22:18 crc kubenswrapper[4783]: I1002 12:22:18.386696 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e8bf2ea3-c311-4aa8-becd-9b6b38974a65/nova-api-log/0.log" Oct 02 12:22:18 crc kubenswrapper[4783]: I1002 12:22:18.645429 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell0-conductor-0_88323f1b-d9ae-4044-9a23-455194f06041/nova-cell0-conductor-conductor/0.log" Oct 02 12:22:18 crc kubenswrapper[4783]: I1002 12:22:18.652909 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-api-0_e8bf2ea3-c311-4aa8-becd-9b6b38974a65/nova-api-api/0.log" Oct 02 12:22:19 crc kubenswrapper[4783]: I1002 12:22:19.081809 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-novncproxy-0_55ed1b2f-a444-43e4-9311-411d81bc052d/nova-cell1-novncproxy-novncproxy/0.log" Oct 02 12:22:19 crc kubenswrapper[4783]: I1002 12:22:19.089975 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-cell1-conductor-0_ac6d24ee-eef2-426e-a186-b24150c6e1e9/nova-cell1-conductor-conductor/0.log" Oct 02 12:22:19 crc kubenswrapper[4783]: I1002 12:22:19.393188 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-8fx67_e68279d9-8229-404b-9d1f-f5963f2e7995/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:19 crc kubenswrapper[4783]: I1002 12:22:19.641774 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-fsw2z_ac474127-0c18-4b02-bffb-9c141a545df2/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:19 crc kubenswrapper[4783]: I1002 12:22:19.828903 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-h695m_2961f1ad-0d0f-46f7-87f5-ad4e2ef71dcc/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:20 crc kubenswrapper[4783]: I1002 12:22:20.012817 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-pxxzd_ad8fdf40-da4c-42c2-a8cd-01675807c93c/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:20 crc kubenswrapper[4783]: I1002 12:22:20.179578 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-r4zjr_3bb46b91-0791-4807-bcb8-20324d854a41/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:20 crc kubenswrapper[4783]: I1002 12:22:20.579267 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-v8t7p_fa70f2b8-5bce-4ebe-a067-66d7dd57e787/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:20 crc kubenswrapper[4783]: I1002 12:22:20.769594 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-edpm-deployment-openstack-edpm-ipam-xp2l6_7f8d39f6-ae36-44ac-87ce-da7542ace825/nova-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:20 crc kubenswrapper[4783]: I1002 12:22:20.974765 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_ec427546-ed98-4091-93e6-563f539a9d69/nova-metadata-log/0.log" Oct 02 12:22:21 crc kubenswrapper[4783]: I1002 12:22:21.452288 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-scheduler-0_7db565dd-9936-429f-8842-9c484199e519/nova-scheduler-scheduler/0.log" Oct 02 12:22:21 crc kubenswrapper[4783]: I1002 12:22:21.786265 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_8c174112-c7e3-43b2-b794-a085b9565b90/mysql-bootstrap/0.log" Oct 02 12:22:22 crc kubenswrapper[4783]: I1002 12:22:22.002240 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_8c174112-c7e3-43b2-b794-a085b9565b90/galera/0.log" Oct 02 12:22:22 crc kubenswrapper[4783]: I1002 12:22:22.050888 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-cell1-galera-0_8c174112-c7e3-43b2-b794-a085b9565b90/mysql-bootstrap/0.log" Oct 02 12:22:22 crc kubenswrapper[4783]: I1002 12:22:22.373314 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b969c99b-7cd2-413c-b9ea-4b0fc855fb66/mysql-bootstrap/0.log" Oct 02 12:22:22 crc kubenswrapper[4783]: I1002 12:22:22.766792 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b969c99b-7cd2-413c-b9ea-4b0fc855fb66/galera/0.log" Oct 02 12:22:22 crc kubenswrapper[4783]: I1002 12:22:22.784678 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstack-galera-0_b969c99b-7cd2-413c-b9ea-4b0fc855fb66/mysql-bootstrap/0.log" Oct 02 12:22:23 crc kubenswrapper[4783]: I1002 12:22:23.050561 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_openstackclient_14cd84ac-799d-4243-ba1c-3d4ff4e110cc/openstackclient/0.log" Oct 02 12:22:23 crc kubenswrapper[4783]: I1002 12:22:23.215681 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_nova-metadata-0_ec427546-ed98-4091-93e6-563f539a9d69/nova-metadata-metadata/0.log" Oct 02 12:22:23 crc kubenswrapper[4783]: I1002 12:22:23.410923 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-k7v6f_c9fa791a-e505-43ab-a361-897b21f24f89/ovn-controller/0.log" Oct 02 12:22:23 crc kubenswrapper[4783]: I1002 12:22:23.648280 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-pc22j_8040f1be-17e4-4424-942a-b7c2bcb55b0b/openstack-network-exporter/0.log" Oct 02 12:22:23 crc kubenswrapper[4783]: I1002 12:22:23.808247 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-p5v7m_246fea45-d93e-4975-bc2e-818ec7eafa99/ovsdb-server-init/0.log" Oct 02 12:22:24 crc kubenswrapper[4783]: I1002 12:22:24.121905 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-p5v7m_246fea45-d93e-4975-bc2e-818ec7eafa99/ovs-vswitchd/0.log" Oct 02 12:22:24 crc kubenswrapper[4783]: I1002 12:22:24.179903 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-p5v7m_246fea45-d93e-4975-bc2e-818ec7eafa99/ovsdb-server-init/0.log" Oct 02 12:22:24 crc kubenswrapper[4783]: I1002 12:22:24.235839 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-p5v7m_246fea45-d93e-4975-bc2e-818ec7eafa99/ovsdb-server/0.log" Oct 02 12:22:24 crc kubenswrapper[4783]: I1002 12:22:24.601978 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-edpm-deployment-openstack-edpm-ipam-8npzc_caba4df9-1229-497f-8dcd-07434b0c9664/ovn-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:24 crc kubenswrapper[4783]: I1002 12:22:24.779327 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_58303536-7e70-4db0-a6ac-0bcf69fb7aa6/openstack-network-exporter/0.log" Oct 02 12:22:24 crc kubenswrapper[4783]: I1002 12:22:24.807794 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_58303536-7e70-4db0-a6ac-0bcf69fb7aa6/ovn-northd/0.log" Oct 02 12:22:25 crc kubenswrapper[4783]: I1002 12:22:25.072977 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_627209ce-c546-42e5-b35e-ab8abd950ef8/openstack-network-exporter/0.log" Oct 02 12:22:25 crc kubenswrapper[4783]: I1002 12:22:25.141733 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_627209ce-c546-42e5-b35e-ab8abd950ef8/ovsdbserver-nb/0.log" Oct 02 12:22:25 crc kubenswrapper[4783]: I1002 12:22:25.400661 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f0a8aca8-fc00-4c0a-82b5-63fc50672f72/openstack-network-exporter/0.log" Oct 02 12:22:25 crc kubenswrapper[4783]: I1002 12:22:25.405035 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_f0a8aca8-fc00-4c0a-82b5-63fc50672f72/ovsdbserver-sb/0.log" Oct 02 12:22:25 crc kubenswrapper[4783]: I1002 12:22:25.809289 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6dd865cb48-8hqrj_83c170e0-d600-413c-b4a9-dbe838f2bcd2/placement-api/0.log" Oct 02 12:22:25 crc kubenswrapper[4783]: I1002 12:22:25.820644 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_placement-6dd865cb48-8hqrj_83c170e0-d600-413c-b4a9-dbe838f2bcd2/placement-log/0.log" Oct 02 12:22:25 crc kubenswrapper[4783]: I1002 12:22:25.993959 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_360fdfae-913f-4be9-985b-26101d3dfb3b/setup-container/0.log" Oct 02 12:22:26 crc kubenswrapper[4783]: I1002 12:22:26.263366 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_360fdfae-913f-4be9-985b-26101d3dfb3b/setup-container/0.log" Oct 02 12:22:26 crc kubenswrapper[4783]: I1002 12:22:26.294841 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-cell1-server-0_360fdfae-913f-4be9-985b-26101d3dfb3b/rabbitmq/0.log" Oct 02 12:22:26 crc kubenswrapper[4783]: I1002 12:22:26.585904 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6/setup-container/0.log" Oct 02 12:22:26 crc kubenswrapper[4783]: I1002 12:22:26.847077 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6/setup-container/0.log" Oct 02 12:22:26 crc kubenswrapper[4783]: I1002 12:22:26.925243 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_rabbitmq-server-0_7a1614d3-ab46-4b39-97f5-82c9cbcdf1c6/rabbitmq/0.log" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.188963 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_reboot-os-edpm-deployment-openstack-edpm-ipam-frzzd_b96ddc9e-8288-4843-86c3-85caab8d78af/reboot-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.366686 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_redhat-edpm-deployment-openstack-edpm-ipam-qsqrg_55b494d4-5d17-4b36-bff6-75cad54634ac/redhat-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.510312 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-jpz4q"] Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.513105 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.593141 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jpz4q"] Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.610317 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-catalog-content\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.610382 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-utilities\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.610403 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mnj5k\" (UniqueName: \"kubernetes.io/projected/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-kube-api-access-mnj5k\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.720182 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-catalog-content\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.721921 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-catalog-content\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.722026 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-utilities\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.722048 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mnj5k\" (UniqueName: \"kubernetes.io/projected/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-kube-api-access-mnj5k\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.722655 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-utilities\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.744541 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mnj5k\" (UniqueName: \"kubernetes.io/projected/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-kube-api-access-mnj5k\") pod \"community-operators-jpz4q\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.762379 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_repo-setup-edpm-deployment-openstack-edpm-ipam-ph6fg_0880686c-c2ed-4b67-80e8-61a1d9aaa0de/repo-setup-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:27 crc kubenswrapper[4783]: I1002 12:22:27.841634 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:28 crc kubenswrapper[4783]: I1002 12:22:28.245528 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_run-os-edpm-deployment-openstack-edpm-ipam-vqjwf_a82c04b0-2ff6-4514-b241-3c068b2a577d/run-os-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:28 crc kubenswrapper[4783]: I1002 12:22:28.289580 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pvfql" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" probeResult="failure" output=< Oct 02 12:22:28 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:22:28 crc kubenswrapper[4783]: > Oct 02 12:22:28 crc kubenswrapper[4783]: I1002 12:22:28.658588 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-jpz4q"] Oct 02 12:22:28 crc kubenswrapper[4783]: I1002 12:22:28.738389 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ssh-known-hosts-edpm-deployment-bpw9h_1a6d6ab9-5cf9-42c3-bc7c-c3afd16f7ac8/ssh-known-hosts-edpm-deployment/0.log" Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.355010 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7ddcb47dc5-2vzlt_06ff3ffc-66e4-48b6-a386-0cc72c47f104/proxy-httpd/0.log" Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.369778 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-proxy-7ddcb47dc5-2vzlt_06ff3ffc-66e4-48b6-a386-0cc72c47f104/proxy-server/0.log" Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.436388 4783 generic.go:334] "Generic (PLEG): container finished" podID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerID="850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231" exitCode=0 Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.436456 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpz4q" event={"ID":"aefd8a3c-6754-4ca9-ad38-ae9afef370c3","Type":"ContainerDied","Data":"850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231"} Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.436564 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpz4q" event={"ID":"aefd8a3c-6754-4ca9-ad38-ae9afef370c3","Type":"ContainerStarted","Data":"02487a88ba39990a2f0226679607e9a37e157aa8447685911ddf4f76cc7c7870"} Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.438871 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.616829 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-ring-rebalance-4b4vd_0072de5e-507d-4efe-8f60-f48b9799fe72/swift-ring-rebalance/0.log" Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.730771 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/account-auditor/0.log" Oct 02 12:22:29 crc kubenswrapper[4783]: I1002 12:22:29.858673 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/account-reaper/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.002645 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/account-replicator/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.003208 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/account-server/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.100087 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/container-auditor/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.302901 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/container-server/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.341395 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/container-updater/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.360018 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/container-replicator/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.454891 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpz4q" event={"ID":"aefd8a3c-6754-4ca9-ad38-ae9afef370c3","Type":"ContainerStarted","Data":"4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3"} Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.581629 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/object-auditor/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.634687 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/object-expirer/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.665239 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/object-replicator/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.834892 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/object-server/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.922568 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/object-updater/0.log" Oct 02 12:22:30 crc kubenswrapper[4783]: I1002 12:22:30.954255 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/rsync/0.log" Oct 02 12:22:31 crc kubenswrapper[4783]: I1002 12:22:31.096331 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_swift-storage-0_5c831884-5efd-4048-b7df-d0edf1d51e89/swift-recon-cron/0.log" Oct 02 12:22:31 crc kubenswrapper[4783]: I1002 12:22:31.311694 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_validate-network-edpm-deployment-openstack-edpm-ipam-bmfl5_1b27c20b-97ac-477e-99c6-075d9f56c078/validate-network-edpm-deployment-openstack-edpm-ipam/0.log" Oct 02 12:22:33 crc kubenswrapper[4783]: I1002 12:22:33.045621 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_memcached-0_f7b5af20-9931-4086-a593-8c0090ce8c12/memcached/0.log" Oct 02 12:22:33 crc kubenswrapper[4783]: I1002 12:22:33.493630 4783 generic.go:334] "Generic (PLEG): container finished" podID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerID="4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3" exitCode=0 Oct 02 12:22:33 crc kubenswrapper[4783]: I1002 12:22:33.493712 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpz4q" event={"ID":"aefd8a3c-6754-4ca9-ad38-ae9afef370c3","Type":"ContainerDied","Data":"4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3"} Oct 02 12:22:34 crc kubenswrapper[4783]: I1002 12:22:34.504127 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpz4q" event={"ID":"aefd8a3c-6754-4ca9-ad38-ae9afef370c3","Type":"ContainerStarted","Data":"57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3"} Oct 02 12:22:34 crc kubenswrapper[4783]: I1002 12:22:34.531579 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-jpz4q" podStartSLOduration=2.8622342 podStartE2EDuration="7.531561009s" podCreationTimestamp="2025-10-02 12:22:27 +0000 UTC" firstStartedPulling="2025-10-02 12:22:29.438633358 +0000 UTC m=+5382.754827619" lastFinishedPulling="2025-10-02 12:22:34.107960167 +0000 UTC m=+5387.424154428" observedRunningTime="2025-10-02 12:22:34.52422867 +0000 UTC m=+5387.840422931" watchObservedRunningTime="2025-10-02 12:22:34.531561009 +0000 UTC m=+5387.847755270" Oct 02 12:22:37 crc kubenswrapper[4783]: I1002 12:22:37.254187 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:22:37 crc kubenswrapper[4783]: I1002 12:22:37.312338 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:22:37 crc kubenswrapper[4783]: I1002 12:22:37.493766 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvfql"] Oct 02 12:22:37 crc kubenswrapper[4783]: I1002 12:22:37.842609 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:37 crc kubenswrapper[4783]: I1002 12:22:37.843226 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:38 crc kubenswrapper[4783]: I1002 12:22:38.539246 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pvfql" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" containerID="cri-o://ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d" gracePeriod=2 Oct 02 12:22:38 crc kubenswrapper[4783]: I1002 12:22:38.915088 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-jpz4q" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="registry-server" probeResult="failure" output=< Oct 02 12:22:38 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:22:38 crc kubenswrapper[4783]: > Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.069049 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.170494 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-utilities\") pod \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.170670 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-catalog-content\") pod \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.170697 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wgnls\" (UniqueName: \"kubernetes.io/projected/67e435fd-09ce-4d8d-aca3-4733bae6d63a-kube-api-access-wgnls\") pod \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\" (UID: \"67e435fd-09ce-4d8d-aca3-4733bae6d63a\") " Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.171909 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-utilities" (OuterVolumeSpecName: "utilities") pod "67e435fd-09ce-4d8d-aca3-4733bae6d63a" (UID: "67e435fd-09ce-4d8d-aca3-4733bae6d63a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.185030 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/67e435fd-09ce-4d8d-aca3-4733bae6d63a-kube-api-access-wgnls" (OuterVolumeSpecName: "kube-api-access-wgnls") pod "67e435fd-09ce-4d8d-aca3-4733bae6d63a" (UID: "67e435fd-09ce-4d8d-aca3-4733bae6d63a"). InnerVolumeSpecName "kube-api-access-wgnls". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.266861 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "67e435fd-09ce-4d8d-aca3-4733bae6d63a" (UID: "67e435fd-09ce-4d8d-aca3-4733bae6d63a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.272347 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.272384 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/67e435fd-09ce-4d8d-aca3-4733bae6d63a-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.272395 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wgnls\" (UniqueName: \"kubernetes.io/projected/67e435fd-09ce-4d8d-aca3-4733bae6d63a-kube-api-access-wgnls\") on node \"crc\" DevicePath \"\"" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.553530 4783 generic.go:334] "Generic (PLEG): container finished" podID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerID="ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d" exitCode=0 Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.553679 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pvfql" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.570675 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvfql" event={"ID":"67e435fd-09ce-4d8d-aca3-4733bae6d63a","Type":"ContainerDied","Data":"ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d"} Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.570717 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pvfql" event={"ID":"67e435fd-09ce-4d8d-aca3-4733bae6d63a","Type":"ContainerDied","Data":"7b0152f6b44813f7a2c1d8743819e539a9d5e74590a6440d200efccde442eb28"} Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.570736 4783 scope.go:117] "RemoveContainer" containerID="ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.615554 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pvfql"] Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.625871 4783 scope.go:117] "RemoveContainer" containerID="6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.633825 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pvfql"] Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.654342 4783 scope.go:117] "RemoveContainer" containerID="684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.708722 4783 scope.go:117] "RemoveContainer" containerID="ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d" Oct 02 12:22:39 crc kubenswrapper[4783]: E1002 12:22:39.711023 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d\": container with ID starting with ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d not found: ID does not exist" containerID="ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.711074 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d"} err="failed to get container status \"ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d\": rpc error: code = NotFound desc = could not find container \"ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d\": container with ID starting with ef01e589680ee02cfda0cff8726d49c15b26af97226730227f469a0ca7ff162d not found: ID does not exist" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.711104 4783 scope.go:117] "RemoveContainer" containerID="6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83" Oct 02 12:22:39 crc kubenswrapper[4783]: E1002 12:22:39.711446 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83\": container with ID starting with 6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83 not found: ID does not exist" containerID="6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.711464 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83"} err="failed to get container status \"6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83\": rpc error: code = NotFound desc = could not find container \"6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83\": container with ID starting with 6cb3b71c83e709236561cbfcd1fa0996a572385af835f1ce8aaaebb718c59f83 not found: ID does not exist" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.711476 4783 scope.go:117] "RemoveContainer" containerID="684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07" Oct 02 12:22:39 crc kubenswrapper[4783]: E1002 12:22:39.711696 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07\": container with ID starting with 684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07 not found: ID does not exist" containerID="684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07" Oct 02 12:22:39 crc kubenswrapper[4783]: I1002 12:22:39.711713 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07"} err="failed to get container status \"684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07\": rpc error: code = NotFound desc = could not find container \"684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07\": container with ID starting with 684deda65c7405d4a52df89210dcad85abdbabc8891da5fce424be6c0657ee07 not found: ID does not exist" Oct 02 12:22:41 crc kubenswrapper[4783]: I1002 12:22:41.556173 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" path="/var/lib/kubelet/pods/67e435fd-09ce-4d8d-aca3-4733bae6d63a/volumes" Oct 02 12:22:48 crc kubenswrapper[4783]: I1002 12:22:48.886809 4783 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-jpz4q" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="registry-server" probeResult="failure" output=< Oct 02 12:22:48 crc kubenswrapper[4783]: timeout: failed to connect service ":50051" within 1s Oct 02 12:22:48 crc kubenswrapper[4783]: > Oct 02 12:22:57 crc kubenswrapper[4783]: I1002 12:22:57.896630 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:57 crc kubenswrapper[4783]: I1002 12:22:57.961421 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:22:58 crc kubenswrapper[4783]: I1002 12:22:58.803573 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jpz4q"] Oct 02 12:22:59 crc kubenswrapper[4783]: I1002 12:22:59.724559 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-jpz4q" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="registry-server" containerID="cri-o://57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3" gracePeriod=2 Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.309962 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.474670 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-utilities\") pod \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.474806 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-catalog-content\") pod \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.474851 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnj5k\" (UniqueName: \"kubernetes.io/projected/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-kube-api-access-mnj5k\") pod \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\" (UID: \"aefd8a3c-6754-4ca9-ad38-ae9afef370c3\") " Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.477318 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-utilities" (OuterVolumeSpecName: "utilities") pod "aefd8a3c-6754-4ca9-ad38-ae9afef370c3" (UID: "aefd8a3c-6754-4ca9-ad38-ae9afef370c3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.496479 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-kube-api-access-mnj5k" (OuterVolumeSpecName: "kube-api-access-mnj5k") pod "aefd8a3c-6754-4ca9-ad38-ae9afef370c3" (UID: "aefd8a3c-6754-4ca9-ad38-ae9afef370c3"). InnerVolumeSpecName "kube-api-access-mnj5k". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.519978 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "aefd8a3c-6754-4ca9-ad38-ae9afef370c3" (UID: "aefd8a3c-6754-4ca9-ad38-ae9afef370c3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.577282 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.577316 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.577327 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnj5k\" (UniqueName: \"kubernetes.io/projected/aefd8a3c-6754-4ca9-ad38-ae9afef370c3-kube-api-access-mnj5k\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.735275 4783 generic.go:334] "Generic (PLEG): container finished" podID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerID="57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3" exitCode=0 Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.735320 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpz4q" event={"ID":"aefd8a3c-6754-4ca9-ad38-ae9afef370c3","Type":"ContainerDied","Data":"57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3"} Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.735351 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-jpz4q" event={"ID":"aefd8a3c-6754-4ca9-ad38-ae9afef370c3","Type":"ContainerDied","Data":"02487a88ba39990a2f0226679607e9a37e157aa8447685911ddf4f76cc7c7870"} Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.735376 4783 scope.go:117] "RemoveContainer" containerID="57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.735544 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-jpz4q" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.765756 4783 scope.go:117] "RemoveContainer" containerID="4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.787818 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-jpz4q"] Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.810969 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-jpz4q"] Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.836708 4783 scope.go:117] "RemoveContainer" containerID="850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.856871 4783 scope.go:117] "RemoveContainer" containerID="57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3" Oct 02 12:23:00 crc kubenswrapper[4783]: E1002 12:23:00.857648 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3\": container with ID starting with 57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3 not found: ID does not exist" containerID="57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.857686 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3"} err="failed to get container status \"57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3\": rpc error: code = NotFound desc = could not find container \"57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3\": container with ID starting with 57ec7aa4988b03f27919dff3368f85fb20dc685dc0189a737410662d79a828d3 not found: ID does not exist" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.857724 4783 scope.go:117] "RemoveContainer" containerID="4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3" Oct 02 12:23:00 crc kubenswrapper[4783]: E1002 12:23:00.858044 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3\": container with ID starting with 4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3 not found: ID does not exist" containerID="4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.858068 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3"} err="failed to get container status \"4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3\": rpc error: code = NotFound desc = could not find container \"4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3\": container with ID starting with 4eb6444b756ad0475722873a5bd4c22b50a3255f7e1864a029a8d8db0bc227f3 not found: ID does not exist" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.858084 4783 scope.go:117] "RemoveContainer" containerID="850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231" Oct 02 12:23:00 crc kubenswrapper[4783]: E1002 12:23:00.858259 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231\": container with ID starting with 850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231 not found: ID does not exist" containerID="850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231" Oct 02 12:23:00 crc kubenswrapper[4783]: I1002 12:23:00.858279 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231"} err="failed to get container status \"850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231\": rpc error: code = NotFound desc = could not find container \"850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231\": container with ID starting with 850e7baa4640fcabe7771fd8a8c27ecea3c46079164819b847e3768382cda231 not found: ID does not exist" Oct 02 12:23:01 crc kubenswrapper[4783]: I1002 12:23:01.557201 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" path="/var/lib/kubelet/pods/aefd8a3c-6754-4ca9-ad38-ae9afef370c3/volumes" Oct 02 12:23:03 crc kubenswrapper[4783]: I1002 12:23:03.759625 4783 generic.go:334] "Generic (PLEG): container finished" podID="02f9b876-95e8-49ed-89ca-b51c262a7cdc" containerID="1c453a9b6f515cc1a7532dce0ec4a7ea7fe3a5db7ab58deabdf5cfbb6f485707" exitCode=0 Oct 02 12:23:03 crc kubenswrapper[4783]: I1002 12:23:03.760146 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" event={"ID":"02f9b876-95e8-49ed-89ca-b51c262a7cdc","Type":"ContainerDied","Data":"1c453a9b6f515cc1a7532dce0ec4a7ea7fe3a5db7ab58deabdf5cfbb6f485707"} Oct 02 12:23:04 crc kubenswrapper[4783]: I1002 12:23:04.910794 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:23:04 crc kubenswrapper[4783]: I1002 12:23:04.942749 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-52dnv/crc-debug-nv4h7"] Oct 02 12:23:04 crc kubenswrapper[4783]: I1002 12:23:04.951174 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-52dnv/crc-debug-nv4h7"] Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.068474 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f9b876-95e8-49ed-89ca-b51c262a7cdc-host\") pod \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.068625 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/02f9b876-95e8-49ed-89ca-b51c262a7cdc-host" (OuterVolumeSpecName: "host") pod "02f9b876-95e8-49ed-89ca-b51c262a7cdc" (UID: "02f9b876-95e8-49ed-89ca-b51c262a7cdc"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.068987 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p5brc\" (UniqueName: \"kubernetes.io/projected/02f9b876-95e8-49ed-89ca-b51c262a7cdc-kube-api-access-p5brc\") pod \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\" (UID: \"02f9b876-95e8-49ed-89ca-b51c262a7cdc\") " Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.069458 4783 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/02f9b876-95e8-49ed-89ca-b51c262a7cdc-host\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.075683 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/02f9b876-95e8-49ed-89ca-b51c262a7cdc-kube-api-access-p5brc" (OuterVolumeSpecName: "kube-api-access-p5brc") pod "02f9b876-95e8-49ed-89ca-b51c262a7cdc" (UID: "02f9b876-95e8-49ed-89ca-b51c262a7cdc"). InnerVolumeSpecName "kube-api-access-p5brc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.171545 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p5brc\" (UniqueName: \"kubernetes.io/projected/02f9b876-95e8-49ed-89ca-b51c262a7cdc-kube-api-access-p5brc\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.555759 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="02f9b876-95e8-49ed-89ca-b51c262a7cdc" path="/var/lib/kubelet/pods/02f9b876-95e8-49ed-89ca-b51c262a7cdc/volumes" Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.780316 4783 scope.go:117] "RemoveContainer" containerID="1c453a9b6f515cc1a7532dce0ec4a7ea7fe3a5db7ab58deabdf5cfbb6f485707" Oct 02 12:23:05 crc kubenswrapper[4783]: I1002 12:23:05.780384 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-nv4h7" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095095 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-52dnv/crc-debug-8tvgl"] Oct 02 12:23:06 crc kubenswrapper[4783]: E1002 12:23:06.095538 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="extract-utilities" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095552 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="extract-utilities" Oct 02 12:23:06 crc kubenswrapper[4783]: E1002 12:23:06.095570 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="extract-utilities" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095577 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="extract-utilities" Oct 02 12:23:06 crc kubenswrapper[4783]: E1002 12:23:06.095592 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="registry-server" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095598 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="registry-server" Oct 02 12:23:06 crc kubenswrapper[4783]: E1002 12:23:06.095607 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="extract-content" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095612 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="extract-content" Oct 02 12:23:06 crc kubenswrapper[4783]: E1002 12:23:06.095634 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095639 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" Oct 02 12:23:06 crc kubenswrapper[4783]: E1002 12:23:06.095651 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="extract-content" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095657 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="extract-content" Oct 02 12:23:06 crc kubenswrapper[4783]: E1002 12:23:06.095673 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="02f9b876-95e8-49ed-89ca-b51c262a7cdc" containerName="container-00" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095678 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="02f9b876-95e8-49ed-89ca-b51c262a7cdc" containerName="container-00" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095842 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="aefd8a3c-6754-4ca9-ad38-ae9afef370c3" containerName="registry-server" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095859 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="02f9b876-95e8-49ed-89ca-b51c262a7cdc" containerName="container-00" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.095871 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="67e435fd-09ce-4d8d-aca3-4733bae6d63a" containerName="registry-server" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.096502 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.192553 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsvqk\" (UniqueName: \"kubernetes.io/projected/b78b67fc-3eac-41f2-b051-568252db9569-kube-api-access-jsvqk\") pod \"crc-debug-8tvgl\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.192744 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b78b67fc-3eac-41f2-b051-568252db9569-host\") pod \"crc-debug-8tvgl\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.294314 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b78b67fc-3eac-41f2-b051-568252db9569-host\") pod \"crc-debug-8tvgl\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.294467 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsvqk\" (UniqueName: \"kubernetes.io/projected/b78b67fc-3eac-41f2-b051-568252db9569-kube-api-access-jsvqk\") pod \"crc-debug-8tvgl\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.294511 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b78b67fc-3eac-41f2-b051-568252db9569-host\") pod \"crc-debug-8tvgl\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.311492 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsvqk\" (UniqueName: \"kubernetes.io/projected/b78b67fc-3eac-41f2-b051-568252db9569-kube-api-access-jsvqk\") pod \"crc-debug-8tvgl\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.415017 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.790709 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" event={"ID":"b78b67fc-3eac-41f2-b051-568252db9569","Type":"ContainerStarted","Data":"8a7172613e4dd76549fe12f42f2c65510d13f8f4618bfc218c3ca82806524c81"} Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.791245 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" event={"ID":"b78b67fc-3eac-41f2-b051-568252db9569","Type":"ContainerStarted","Data":"8e1bcb7f27f419d7f06132318d41dd0cce2c422da78ba09405923156fd11cb80"} Oct 02 12:23:06 crc kubenswrapper[4783]: I1002 12:23:06.807776 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" podStartSLOduration=0.807754824 podStartE2EDuration="807.754824ms" podCreationTimestamp="2025-10-02 12:23:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-10-02 12:23:06.804924317 +0000 UTC m=+5420.121118588" watchObservedRunningTime="2025-10-02 12:23:06.807754824 +0000 UTC m=+5420.123949085" Oct 02 12:23:08 crc kubenswrapper[4783]: I1002 12:23:08.810042 4783 generic.go:334] "Generic (PLEG): container finished" podID="b78b67fc-3eac-41f2-b051-568252db9569" containerID="8a7172613e4dd76549fe12f42f2c65510d13f8f4618bfc218c3ca82806524c81" exitCode=0 Oct 02 12:23:08 crc kubenswrapper[4783]: I1002 12:23:08.810336 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" event={"ID":"b78b67fc-3eac-41f2-b051-568252db9569","Type":"ContainerDied","Data":"8a7172613e4dd76549fe12f42f2c65510d13f8f4618bfc218c3ca82806524c81"} Oct 02 12:23:09 crc kubenswrapper[4783]: I1002 12:23:09.916949 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.063572 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b78b67fc-3eac-41f2-b051-568252db9569-host\") pod \"b78b67fc-3eac-41f2-b051-568252db9569\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.063651 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsvqk\" (UniqueName: \"kubernetes.io/projected/b78b67fc-3eac-41f2-b051-568252db9569-kube-api-access-jsvqk\") pod \"b78b67fc-3eac-41f2-b051-568252db9569\" (UID: \"b78b67fc-3eac-41f2-b051-568252db9569\") " Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.063715 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/b78b67fc-3eac-41f2-b051-568252db9569-host" (OuterVolumeSpecName: "host") pod "b78b67fc-3eac-41f2-b051-568252db9569" (UID: "b78b67fc-3eac-41f2-b051-568252db9569"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.064029 4783 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/b78b67fc-3eac-41f2-b051-568252db9569-host\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.081335 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b78b67fc-3eac-41f2-b051-568252db9569-kube-api-access-jsvqk" (OuterVolumeSpecName: "kube-api-access-jsvqk") pod "b78b67fc-3eac-41f2-b051-568252db9569" (UID: "b78b67fc-3eac-41f2-b051-568252db9569"). InnerVolumeSpecName "kube-api-access-jsvqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.165484 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsvqk\" (UniqueName: \"kubernetes.io/projected/b78b67fc-3eac-41f2-b051-568252db9569-kube-api-access-jsvqk\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.858726 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" event={"ID":"b78b67fc-3eac-41f2-b051-568252db9569","Type":"ContainerDied","Data":"8e1bcb7f27f419d7f06132318d41dd0cce2c422da78ba09405923156fd11cb80"} Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.858986 4783 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="8e1bcb7f27f419d7f06132318d41dd0cce2c422da78ba09405923156fd11cb80" Oct 02 12:23:10 crc kubenswrapper[4783]: I1002 12:23:10.859091 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-8tvgl" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.727654 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-88wmk"] Oct 02 12:23:12 crc kubenswrapper[4783]: E1002 12:23:12.728519 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b78b67fc-3eac-41f2-b051-568252db9569" containerName="container-00" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.728536 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="b78b67fc-3eac-41f2-b051-568252db9569" containerName="container-00" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.728821 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="b78b67fc-3eac-41f2-b051-568252db9569" containerName="container-00" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.732381 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.756399 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-88wmk"] Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.802470 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hx2px\" (UniqueName: \"kubernetes.io/projected/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-kube-api-access-hx2px\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.802557 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-utilities\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.802588 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-catalog-content\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.893294 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-52dnv/crc-debug-8tvgl"] Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.902785 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-52dnv/crc-debug-8tvgl"] Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.904352 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-utilities\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.904524 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-catalog-content\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.904741 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hx2px\" (UniqueName: \"kubernetes.io/projected/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-kube-api-access-hx2px\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.904956 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-utilities\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.905017 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-catalog-content\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:12 crc kubenswrapper[4783]: I1002 12:23:12.944648 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hx2px\" (UniqueName: \"kubernetes.io/projected/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-kube-api-access-hx2px\") pod \"redhat-marketplace-88wmk\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:13 crc kubenswrapper[4783]: I1002 12:23:13.075060 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:13 crc kubenswrapper[4783]: I1002 12:23:13.563919 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b78b67fc-3eac-41f2-b051-568252db9569" path="/var/lib/kubelet/pods/b78b67fc-3eac-41f2-b051-568252db9569/volumes" Oct 02 12:23:13 crc kubenswrapper[4783]: I1002 12:23:13.564821 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-88wmk"] Oct 02 12:23:13 crc kubenswrapper[4783]: I1002 12:23:13.891616 4783 generic.go:334] "Generic (PLEG): container finished" podID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerID="10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d" exitCode=0 Oct 02 12:23:13 crc kubenswrapper[4783]: I1002 12:23:13.891921 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88wmk" event={"ID":"8cb4cbd2-32f7-461f-8a01-47baf029d1a5","Type":"ContainerDied","Data":"10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d"} Oct 02 12:23:13 crc kubenswrapper[4783]: I1002 12:23:13.891955 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88wmk" event={"ID":"8cb4cbd2-32f7-461f-8a01-47baf029d1a5","Type":"ContainerStarted","Data":"6b248e3bd0921b9f6c57eb6f39cfe75c7233b65c562c5365459f8889111ae259"} Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.109363 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-52dnv/crc-debug-pfzm5"] Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.110584 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.131890 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6q6lw\" (UniqueName: \"kubernetes.io/projected/db15bda2-72c3-4ec5-ae17-92ff291af121-kube-api-access-6q6lw\") pod \"crc-debug-pfzm5\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.132245 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/db15bda2-72c3-4ec5-ae17-92ff291af121-host\") pod \"crc-debug-pfzm5\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.233973 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6q6lw\" (UniqueName: \"kubernetes.io/projected/db15bda2-72c3-4ec5-ae17-92ff291af121-kube-api-access-6q6lw\") pod \"crc-debug-pfzm5\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.234056 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/db15bda2-72c3-4ec5-ae17-92ff291af121-host\") pod \"crc-debug-pfzm5\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.234471 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/db15bda2-72c3-4ec5-ae17-92ff291af121-host\") pod \"crc-debug-pfzm5\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.263065 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6q6lw\" (UniqueName: \"kubernetes.io/projected/db15bda2-72c3-4ec5-ae17-92ff291af121-kube-api-access-6q6lw\") pod \"crc-debug-pfzm5\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.432542 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:14 crc kubenswrapper[4783]: I1002 12:23:14.900910 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-pfzm5" event={"ID":"db15bda2-72c3-4ec5-ae17-92ff291af121","Type":"ContainerStarted","Data":"ce84325ac5fd79ffc0496050ef9d79c98c05290c6c97492fcbe61379c71de97b"} Oct 02 12:23:15 crc kubenswrapper[4783]: I1002 12:23:15.914610 4783 generic.go:334] "Generic (PLEG): container finished" podID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerID="0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54" exitCode=0 Oct 02 12:23:15 crc kubenswrapper[4783]: I1002 12:23:15.914715 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88wmk" event={"ID":"8cb4cbd2-32f7-461f-8a01-47baf029d1a5","Type":"ContainerDied","Data":"0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54"} Oct 02 12:23:15 crc kubenswrapper[4783]: I1002 12:23:15.921760 4783 generic.go:334] "Generic (PLEG): container finished" podID="db15bda2-72c3-4ec5-ae17-92ff291af121" containerID="08d491ea11f4df375813b6f180fbb39f8601c31be14c27c1dae1d8cf8f50e264" exitCode=0 Oct 02 12:23:15 crc kubenswrapper[4783]: I1002 12:23:15.921810 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/crc-debug-pfzm5" event={"ID":"db15bda2-72c3-4ec5-ae17-92ff291af121","Type":"ContainerDied","Data":"08d491ea11f4df375813b6f180fbb39f8601c31be14c27c1dae1d8cf8f50e264"} Oct 02 12:23:15 crc kubenswrapper[4783]: I1002 12:23:15.979322 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-52dnv/crc-debug-pfzm5"] Oct 02 12:23:15 crc kubenswrapper[4783]: I1002 12:23:15.989445 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-52dnv/crc-debug-pfzm5"] Oct 02 12:23:16 crc kubenswrapper[4783]: I1002 12:23:16.931463 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88wmk" event={"ID":"8cb4cbd2-32f7-461f-8a01-47baf029d1a5","Type":"ContainerStarted","Data":"16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd"} Oct 02 12:23:16 crc kubenswrapper[4783]: I1002 12:23:16.980538 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-88wmk" podStartSLOduration=2.4724614 podStartE2EDuration="4.980505581s" podCreationTimestamp="2025-10-02 12:23:12 +0000 UTC" firstStartedPulling="2025-10-02 12:23:13.893877092 +0000 UTC m=+5427.210071353" lastFinishedPulling="2025-10-02 12:23:16.401921283 +0000 UTC m=+5429.718115534" observedRunningTime="2025-10-02 12:23:16.973665425 +0000 UTC m=+5430.289859686" watchObservedRunningTime="2025-10-02 12:23:16.980505581 +0000 UTC m=+5430.296699842" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.027899 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.087990 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6q6lw\" (UniqueName: \"kubernetes.io/projected/db15bda2-72c3-4ec5-ae17-92ff291af121-kube-api-access-6q6lw\") pod \"db15bda2-72c3-4ec5-ae17-92ff291af121\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.088085 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/db15bda2-72c3-4ec5-ae17-92ff291af121-host\") pod \"db15bda2-72c3-4ec5-ae17-92ff291af121\" (UID: \"db15bda2-72c3-4ec5-ae17-92ff291af121\") " Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.088197 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/db15bda2-72c3-4ec5-ae17-92ff291af121-host" (OuterVolumeSpecName: "host") pod "db15bda2-72c3-4ec5-ae17-92ff291af121" (UID: "db15bda2-72c3-4ec5-ae17-92ff291af121"). InnerVolumeSpecName "host". PluginName "kubernetes.io/host-path", VolumeGidValue "" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.088603 4783 reconciler_common.go:293] "Volume detached for volume \"host\" (UniqueName: \"kubernetes.io/host-path/db15bda2-72c3-4ec5-ae17-92ff291af121-host\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.104676 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db15bda2-72c3-4ec5-ae17-92ff291af121-kube-api-access-6q6lw" (OuterVolumeSpecName: "kube-api-access-6q6lw") pod "db15bda2-72c3-4ec5-ae17-92ff291af121" (UID: "db15bda2-72c3-4ec5-ae17-92ff291af121"). InnerVolumeSpecName "kube-api-access-6q6lw". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.190432 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6q6lw\" (UniqueName: \"kubernetes.io/projected/db15bda2-72c3-4ec5-ae17-92ff291af121-kube-api-access-6q6lw\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.557081 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db15bda2-72c3-4ec5-ae17-92ff291af121" path="/var/lib/kubelet/pods/db15bda2-72c3-4ec5-ae17-92ff291af121/volumes" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.947543 4783 scope.go:117] "RemoveContainer" containerID="08d491ea11f4df375813b6f180fbb39f8601c31be14c27c1dae1d8cf8f50e264" Oct 02 12:23:17 crc kubenswrapper[4783]: I1002 12:23:17.947584 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/crc-debug-pfzm5" Oct 02 12:23:18 crc kubenswrapper[4783]: I1002 12:23:18.542709 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9_04a57d36-edbe-46bc-b8b8-d884500f159a/util/0.log" Oct 02 12:23:18 crc kubenswrapper[4783]: I1002 12:23:18.771378 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9_04a57d36-edbe-46bc-b8b8-d884500f159a/pull/0.log" Oct 02 12:23:18 crc kubenswrapper[4783]: I1002 12:23:18.814105 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9_04a57d36-edbe-46bc-b8b8-d884500f159a/util/0.log" Oct 02 12:23:18 crc kubenswrapper[4783]: I1002 12:23:18.819128 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9_04a57d36-edbe-46bc-b8b8-d884500f159a/pull/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.046536 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9_04a57d36-edbe-46bc-b8b8-d884500f159a/pull/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.138183 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9_04a57d36-edbe-46bc-b8b8-d884500f159a/util/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.143305 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_157dbb73256b1b6d15c92cb3b9832917051f27d0aa325f8cd46370e26dn66t9_04a57d36-edbe-46bc-b8b8-d884500f159a/extract/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.275360 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6ff8b75857-x7698_87d34dfd-25c7-4f4c-bbda-058e38a01994/kube-rbac-proxy/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.433456 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-6ff8b75857-x7698_87d34dfd-25c7-4f4c-bbda-058e38a01994/manager/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.496870 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-644bddb6d8-fkhwr_ac5ac6e0-2521-4daa-8c0e-091f13b7a406/kube-rbac-proxy/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.573197 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-644bddb6d8-fkhwr_ac5ac6e0-2521-4daa-8c0e-091f13b7a406/manager/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.652755 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-84f4f7b77b-wt52p_52d93c12-b942-4ad3-935a-b555026711ea/kube-rbac-proxy/0.log" Oct 02 12:23:19 crc kubenswrapper[4783]: I1002 12:23:19.722392 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-84f4f7b77b-wt52p_52d93c12-b942-4ad3-935a-b555026711ea/manager/0.log" Oct 02 12:23:20 crc kubenswrapper[4783]: I1002 12:23:20.208293 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84958c4d49-92cr8_8041f454-2294-478d-b4cd-ffa769b8f709/kube-rbac-proxy/0.log" Oct 02 12:23:20 crc kubenswrapper[4783]: I1002 12:23:20.275386 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-84958c4d49-92cr8_8041f454-2294-478d-b4cd-ffa769b8f709/manager/0.log" Oct 02 12:23:20 crc kubenswrapper[4783]: I1002 12:23:20.372052 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5d889d78cf-pqhp6_24d5bc39-4bbb-47af-94e2-222118ccdabb/kube-rbac-proxy/0.log" Oct 02 12:23:20 crc kubenswrapper[4783]: I1002 12:23:20.516695 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-5d889d78cf-pqhp6_24d5bc39-4bbb-47af-94e2-222118ccdabb/manager/0.log" Oct 02 12:23:20 crc kubenswrapper[4783]: I1002 12:23:20.658748 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-9f4696d94-9pqks_ae698aff-8888-48b6-9c37-cbcea6e7bc6e/kube-rbac-proxy/0.log" Oct 02 12:23:20 crc kubenswrapper[4783]: I1002 12:23:20.767530 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-9f4696d94-9pqks_ae698aff-8888-48b6-9c37-cbcea6e7bc6e/manager/0.log" Oct 02 12:23:20 crc kubenswrapper[4783]: I1002 12:23:20.952223 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-9d6c5db85-6bk6b_018c6179-6e65-461a-b457-a5eb949672de/kube-rbac-proxy/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.114483 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5cd4858477-hs7gk_c788b295-31bf-496b-8b4f-fccc3ff3be17/kube-rbac-proxy/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.188742 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-9d6c5db85-6bk6b_018c6179-6e65-461a-b457-a5eb949672de/manager/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.307028 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-5cd4858477-hs7gk_c788b295-31bf-496b-8b4f-fccc3ff3be17/manager/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.469343 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5bd55b4bff-qtqcj_6cf94630-4019-4863-80be-6e1088cf3407/kube-rbac-proxy/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.569035 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-5bd55b4bff-qtqcj_6cf94630-4019-4863-80be-6e1088cf3407/manager/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.721512 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6d68dbc695-qg9vc_1f8c1065-586c-4dce-a4ce-fc262d00063d/manager/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.800035 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-6d68dbc695-qg9vc_1f8c1065-586c-4dce-a4ce-fc262d00063d/kube-rbac-proxy/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.993514 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-88c7-w5766_56a432ad-7000-45e3-ac88-9ebd3a1eb3a7/manager/0.log" Oct 02 12:23:21 crc kubenswrapper[4783]: I1002 12:23:21.995719 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-88c7-w5766_56a432ad-7000-45e3-ac88-9ebd3a1eb3a7/kube-rbac-proxy/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.194209 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-849d5b9b84-6xwmm_a57ed846-184b-49fd-af9f-efc1fbba98e5/kube-rbac-proxy/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.300982 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-849d5b9b84-6xwmm_a57ed846-184b-49fd-af9f-efc1fbba98e5/manager/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.318757 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-64cd67b5cb-kfxk9_82a47dd2-82ef-4fb9-9216-d14a2332683f/kube-rbac-proxy/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.469018 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-64cd67b5cb-kfxk9_82a47dd2-82ef-4fb9-9216-d14a2332683f/manager/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.616926 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7b787867f4-k7m89_139b1abc-ddcf-4ccc-83f4-deb58a682b0c/kube-rbac-proxy/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.667205 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7b787867f4-k7m89_139b1abc-ddcf-4ccc-83f4-deb58a682b0c/manager/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.680497 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5869cb545-4pwjh_ebfbf228-ed71-4331-96be-8105d5029d2c/kube-rbac-proxy/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.760399 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-5869cb545-4pwjh_ebfbf228-ed71-4331-96be-8105d5029d2c/manager/0.log" Oct 02 12:23:22 crc kubenswrapper[4783]: I1002 12:23:22.898451 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5f7d749dc7-gzmfg_002e4392-6fb3-4354-86d1-8c6f1727e05c/kube-rbac-proxy/0.log" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.051075 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-cc764bd77-wk2px_0d2a789d-053a-480b-a29e-3c0afef53319/kube-rbac-proxy/0.log" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.078945 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.078988 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.165973 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.350260 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-98sw6_999b4d1a-2ac1-42e6-8799-d52d3f34341a/registry-server/0.log" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.506408 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-operator-cc764bd77-wk2px_0d2a789d-053a-480b-a29e-3c0afef53319/operator/0.log" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.903828 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-9976ff44c-xbh4h_81c143d8-5f4b-4baf-9cb1-6f34110f4833/kube-rbac-proxy/0.log" Oct 02 12:23:23 crc kubenswrapper[4783]: I1002 12:23:23.993485 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-9976ff44c-xbh4h_81c143d8-5f4b-4baf-9cb1-6f34110f4833/manager/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.037958 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-589c58c6c-qkgtm_7d7c0a51-805f-422d-bb62-75a53f9a80d0/kube-rbac-proxy/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.063101 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.116879 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-88wmk"] Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.246783 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-589c58c6c-qkgtm_7d7c0a51-805f-422d-bb62-75a53f9a80d0/manager/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.392789 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-5f97d8c699-xgkcf_90aa3047-331d-471b-b009-9be03d87b3ed/operator/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.407536 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-5f7d749dc7-gzmfg_002e4392-6fb3-4354-86d1-8c6f1727e05c/manager/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.519755 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-84d6b4b759-9fvns_96ca8cc4-c237-43b0-ae6b-7cc86a183f46/manager/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.565143 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-84d6b4b759-9fvns_96ca8cc4-c237-43b0-ae6b-7cc86a183f46/kube-rbac-proxy/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.594225 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-b8d54b5d7-b5rx5_650ecd19-902d-48f8-bea4-2c7f120885dc/kube-rbac-proxy/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.720317 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-b8d54b5d7-b5rx5_650ecd19-902d-48f8-bea4-2c7f120885dc/manager/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.864235 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-85777745bb-5b98j_0a24bee6-3dd0-47b3-881a-b5cff49d1e1e/manager/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.904731 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-85777745bb-5b98j_0a24bee6-3dd0-47b3-881a-b5cff49d1e1e/kube-rbac-proxy/0.log" Oct 02 12:23:24 crc kubenswrapper[4783]: I1002 12:23:24.963663 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b9957f54f-pwg7t_d7665104-7dc0-4450-9d2a-85d514354c9e/kube-rbac-proxy/0.log" Oct 02 12:23:25 crc kubenswrapper[4783]: I1002 12:23:25.089619 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-6b9957f54f-pwg7t_d7665104-7dc0-4450-9d2a-85d514354c9e/manager/0.log" Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.022171 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-88wmk" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="registry-server" containerID="cri-o://16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd" gracePeriod=2 Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.595204 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.789109 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-catalog-content\") pod \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.789186 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-utilities\") pod \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.789254 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hx2px\" (UniqueName: \"kubernetes.io/projected/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-kube-api-access-hx2px\") pod \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\" (UID: \"8cb4cbd2-32f7-461f-8a01-47baf029d1a5\") " Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.790489 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-utilities" (OuterVolumeSpecName: "utilities") pod "8cb4cbd2-32f7-461f-8a01-47baf029d1a5" (UID: "8cb4cbd2-32f7-461f-8a01-47baf029d1a5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.795266 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-kube-api-access-hx2px" (OuterVolumeSpecName: "kube-api-access-hx2px") pod "8cb4cbd2-32f7-461f-8a01-47baf029d1a5" (UID: "8cb4cbd2-32f7-461f-8a01-47baf029d1a5"). InnerVolumeSpecName "kube-api-access-hx2px". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.801631 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8cb4cbd2-32f7-461f-8a01-47baf029d1a5" (UID: "8cb4cbd2-32f7-461f-8a01-47baf029d1a5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.893349 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.893738 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:26 crc kubenswrapper[4783]: I1002 12:23:26.893754 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hx2px\" (UniqueName: \"kubernetes.io/projected/8cb4cbd2-32f7-461f-8a01-47baf029d1a5-kube-api-access-hx2px\") on node \"crc\" DevicePath \"\"" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.031181 4783 generic.go:334] "Generic (PLEG): container finished" podID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerID="16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd" exitCode=0 Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.031241 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88wmk" event={"ID":"8cb4cbd2-32f7-461f-8a01-47baf029d1a5","Type":"ContainerDied","Data":"16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd"} Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.031277 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-88wmk" event={"ID":"8cb4cbd2-32f7-461f-8a01-47baf029d1a5","Type":"ContainerDied","Data":"6b248e3bd0921b9f6c57eb6f39cfe75c7233b65c562c5365459f8889111ae259"} Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.031301 4783 scope.go:117] "RemoveContainer" containerID="16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.033278 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-88wmk" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.057823 4783 scope.go:117] "RemoveContainer" containerID="0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.086067 4783 scope.go:117] "RemoveContainer" containerID="10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.096929 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-88wmk"] Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.114099 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-88wmk"] Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.137703 4783 scope.go:117] "RemoveContainer" containerID="16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd" Oct 02 12:23:27 crc kubenswrapper[4783]: E1002 12:23:27.141848 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd\": container with ID starting with 16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd not found: ID does not exist" containerID="16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.141897 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd"} err="failed to get container status \"16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd\": rpc error: code = NotFound desc = could not find container \"16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd\": container with ID starting with 16cfa0d777b0608d813a8c617f9ede9d51ebab01b52e70075e284d78012bd2bd not found: ID does not exist" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.141931 4783 scope.go:117] "RemoveContainer" containerID="0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54" Oct 02 12:23:27 crc kubenswrapper[4783]: E1002 12:23:27.143343 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54\": container with ID starting with 0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54 not found: ID does not exist" containerID="0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.143390 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54"} err="failed to get container status \"0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54\": rpc error: code = NotFound desc = could not find container \"0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54\": container with ID starting with 0b7bc7dff29b8051921d016490b4faf124a9769099b4cf0c34b90c7a0155ef54 not found: ID does not exist" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.143464 4783 scope.go:117] "RemoveContainer" containerID="10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d" Oct 02 12:23:27 crc kubenswrapper[4783]: E1002 12:23:27.143826 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d\": container with ID starting with 10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d not found: ID does not exist" containerID="10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.143863 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d"} err="failed to get container status \"10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d\": rpc error: code = NotFound desc = could not find container \"10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d\": container with ID starting with 10df9641ddad12c37c04b7ecffaed9be3f4956b7ea69838b6bd453f70730be1d not found: ID does not exist" Oct 02 12:23:27 crc kubenswrapper[4783]: I1002 12:23:27.583598 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" path="/var/lib/kubelet/pods/8cb4cbd2-32f7-461f-8a01-47baf029d1a5/volumes" Oct 02 12:23:41 crc kubenswrapper[4783]: I1002 12:23:41.470617 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-8mjk7_a6d0e458-bd0f-402c-8cad-db6eba1e45ff/control-plane-machine-set-operator/0.log" Oct 02 12:23:41 crc kubenswrapper[4783]: I1002 12:23:41.622833 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-xlxdx_50389666-bf4f-4442-a4cd-f3609994ce1b/kube-rbac-proxy/0.log" Oct 02 12:23:41 crc kubenswrapper[4783]: I1002 12:23:41.696550 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-xlxdx_50389666-bf4f-4442-a4cd-f3609994ce1b/machine-api-operator/0.log" Oct 02 12:23:53 crc kubenswrapper[4783]: I1002 12:23:53.112627 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-5b446d88c5-gkpsw_faf53b2d-1a5b-4753-9067-2c0e6451c204/cert-manager-controller/0.log" Oct 02 12:23:53 crc kubenswrapper[4783]: I1002 12:23:53.284023 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-7f985d654d-n7shk_5f865515-bc01-488a-9f04-6e69b072a30e/cert-manager-cainjector/0.log" Oct 02 12:23:53 crc kubenswrapper[4783]: I1002 12:23:53.385951 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-5655c58dd6-stgp6_25ca49b1-8994-44b5-b27d-671cd01d74da/cert-manager-webhook/0.log" Oct 02 12:24:05 crc kubenswrapper[4783]: I1002 12:24:05.311727 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-6b874cbd85-5b9kv_d206e83f-c7d7-4a26-961b-7649d60646a0/nmstate-console-plugin/0.log" Oct 02 12:24:05 crc kubenswrapper[4783]: I1002 12:24:05.493526 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-9kdkg_d95fff49-fc1c-4c9c-af21-f6058ede50d5/nmstate-handler/0.log" Oct 02 12:24:05 crc kubenswrapper[4783]: I1002 12:24:05.570033 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-m8ljg_dd90f287-398b-473a-b421-57a85abdca9a/kube-rbac-proxy/0.log" Oct 02 12:24:05 crc kubenswrapper[4783]: I1002 12:24:05.583491 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-fdff9cb8d-m8ljg_dd90f287-398b-473a-b421-57a85abdca9a/nmstate-metrics/0.log" Oct 02 12:24:05 crc kubenswrapper[4783]: I1002 12:24:05.760671 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-858ddd8f98-4wb7g_f6125a3c-e2a7-45aa-be54-af390730c09a/nmstate-operator/0.log" Oct 02 12:24:05 crc kubenswrapper[4783]: I1002 12:24:05.768127 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-6cdbc54649-f8h72_051f4c7b-ead6-4bd5-89d4-b64cf47f24d5/nmstate-webhook/0.log" Oct 02 12:24:19 crc kubenswrapper[4783]: I1002 12:24:19.740965 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-d47hb_c3dfee2e-30ff-42e9-b095-6315d8ab67d8/kube-rbac-proxy/0.log" Oct 02 12:24:19 crc kubenswrapper[4783]: I1002 12:24:19.842470 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-68d546b9d8-d47hb_c3dfee2e-30ff-42e9-b095-6315d8ab67d8/controller/0.log" Oct 02 12:24:19 crc kubenswrapper[4783]: I1002 12:24:19.927262 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-frr-files/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.151395 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-reloader/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.198529 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-metrics/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.220911 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-frr-files/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.227736 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-reloader/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.436949 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-frr-files/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.480591 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-reloader/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.487320 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-metrics/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.540888 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-metrics/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.725451 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-frr-files/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.732944 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-reloader/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.793704 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/controller/0.log" Oct 02 12:24:20 crc kubenswrapper[4783]: I1002 12:24:20.796866 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/cp-metrics/0.log" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.014524 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/kube-rbac-proxy-frr/0.log" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.036143 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/frr-metrics/0.log" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.082670 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/kube-rbac-proxy/0.log" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.317227 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/reloader/0.log" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.446540 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-64bf5d555-g9k65_6d68c40b-106d-478a-ad63-90dc1bbaf434/frr-k8s-webhook-server/0.log" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.513116 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.513176 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.681065 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-c4c4dd5fd-sqv6n_624d75e0-0672-4797-8791-25096bfbf553/manager/0.log" Oct 02 12:24:21 crc kubenswrapper[4783]: I1002 12:24:21.900584 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-9d574b6b6-j97tc_fdb3e990-3206-4ed2-8df2-86695dadf11f/webhook-server/0.log" Oct 02 12:24:22 crc kubenswrapper[4783]: I1002 12:24:22.047637 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x5j7x_8e0102e1-45d3-4a6f-a080-55c51c71d864/kube-rbac-proxy/0.log" Oct 02 12:24:22 crc kubenswrapper[4783]: I1002 12:24:22.394833 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-lp8kh_d9b837d5-d442-41aa-b6be-2fea310d330c/frr/0.log" Oct 02 12:24:22 crc kubenswrapper[4783]: I1002 12:24:22.547711 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-x5j7x_8e0102e1-45d3-4a6f-a080-55c51c71d864/speaker/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.326216 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m_6083ba68-92cc-4887-995a-f0aa6a582a48/util/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.554359 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m_6083ba68-92cc-4887-995a-f0aa6a582a48/pull/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.580287 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m_6083ba68-92cc-4887-995a-f0aa6a582a48/pull/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.618138 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m_6083ba68-92cc-4887-995a-f0aa6a582a48/util/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.760886 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m_6083ba68-92cc-4887-995a-f0aa6a582a48/pull/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.784474 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m_6083ba68-92cc-4887-995a-f0aa6a582a48/util/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.848604 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_8f2f4ee801e5826a37d84a7b1fc4ccbf6b79de668302737d0f1152d8d26bw7m_6083ba68-92cc-4887-995a-f0aa6a582a48/extract/0.log" Oct 02 12:24:34 crc kubenswrapper[4783]: I1002 12:24:34.955056 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ttd82_29f73f22-c6f7-4508-a35a-6ff23efdaaf1/extract-utilities/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.100074 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ttd82_29f73f22-c6f7-4508-a35a-6ff23efdaaf1/extract-utilities/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.124629 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ttd82_29f73f22-c6f7-4508-a35a-6ff23efdaaf1/extract-content/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.130600 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ttd82_29f73f22-c6f7-4508-a35a-6ff23efdaaf1/extract-content/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.294333 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ttd82_29f73f22-c6f7-4508-a35a-6ff23efdaaf1/extract-utilities/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.359128 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ttd82_29f73f22-c6f7-4508-a35a-6ff23efdaaf1/extract-content/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.561354 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x657f_dac5b597-9c3c-414f-8f54-f36f8550d53d/extract-utilities/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.853876 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x657f_dac5b597-9c3c-414f-8f54-f36f8550d53d/extract-content/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.927082 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x657f_dac5b597-9c3c-414f-8f54-f36f8550d53d/extract-content/0.log" Oct 02 12:24:35 crc kubenswrapper[4783]: I1002 12:24:35.935299 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x657f_dac5b597-9c3c-414f-8f54-f36f8550d53d/extract-utilities/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.048452 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-ttd82_29f73f22-c6f7-4508-a35a-6ff23efdaaf1/registry-server/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.179055 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x657f_dac5b597-9c3c-414f-8f54-f36f8550d53d/extract-utilities/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.249692 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x657f_dac5b597-9c3c-414f-8f54-f36f8550d53d/extract-content/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.552495 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx_09df6785-86ca-4bd5-958d-931c38c75084/util/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.712160 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx_09df6785-86ca-4bd5-958d-931c38c75084/pull/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.725458 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx_09df6785-86ca-4bd5-958d-931c38c75084/util/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.864864 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx_09df6785-86ca-4bd5-958d-931c38c75084/pull/0.log" Oct 02 12:24:36 crc kubenswrapper[4783]: I1002 12:24:36.894382 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-x657f_dac5b597-9c3c-414f-8f54-f36f8550d53d/registry-server/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.122713 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx_09df6785-86ca-4bd5-958d-931c38c75084/extract/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.134215 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx_09df6785-86ca-4bd5-958d-931c38c75084/pull/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.155763 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_fa9831ede5d93c33d525b70ce6ddf94e500d80992af75a3305fe98835c4hstx_09df6785-86ca-4bd5-958d-931c38c75084/util/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.287738 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-qxbct_6f14819a-a6a1-49ff-8e2a-ba7761c8a2be/marketplace-operator/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.350690 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-lcc7z_bcf29f25-4d45-44e2-aae1-32ec435ad6a6/extract-utilities/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.630384 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-lcc7z_bcf29f25-4d45-44e2-aae1-32ec435ad6a6/extract-content/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.635532 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-lcc7z_bcf29f25-4d45-44e2-aae1-32ec435ad6a6/extract-content/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.671651 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-lcc7z_bcf29f25-4d45-44e2-aae1-32ec435ad6a6/extract-utilities/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.868159 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-lcc7z_bcf29f25-4d45-44e2-aae1-32ec435ad6a6/extract-utilities/0.log" Oct 02 12:24:37 crc kubenswrapper[4783]: I1002 12:24:37.876465 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-lcc7z_bcf29f25-4d45-44e2-aae1-32ec435ad6a6/extract-content/0.log" Oct 02 12:24:38 crc kubenswrapper[4783]: I1002 12:24:38.057052 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-lcc7z_bcf29f25-4d45-44e2-aae1-32ec435ad6a6/registry-server/0.log" Oct 02 12:24:38 crc kubenswrapper[4783]: I1002 12:24:38.123255 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xqljq_01532edf-0ac3-455f-8915-72314d7061c1/extract-utilities/0.log" Oct 02 12:24:38 crc kubenswrapper[4783]: I1002 12:24:38.332747 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xqljq_01532edf-0ac3-455f-8915-72314d7061c1/extract-utilities/0.log" Oct 02 12:24:38 crc kubenswrapper[4783]: I1002 12:24:38.343699 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xqljq_01532edf-0ac3-455f-8915-72314d7061c1/extract-content/0.log" Oct 02 12:24:38 crc kubenswrapper[4783]: I1002 12:24:38.369472 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xqljq_01532edf-0ac3-455f-8915-72314d7061c1/extract-content/0.log" Oct 02 12:24:38 crc kubenswrapper[4783]: I1002 12:24:38.545159 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xqljq_01532edf-0ac3-455f-8915-72314d7061c1/extract-utilities/0.log" Oct 02 12:24:38 crc kubenswrapper[4783]: I1002 12:24:38.570157 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xqljq_01532edf-0ac3-455f-8915-72314d7061c1/extract-content/0.log" Oct 02 12:24:39 crc kubenswrapper[4783]: I1002 12:24:39.180507 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-xqljq_01532edf-0ac3-455f-8915-72314d7061c1/registry-server/0.log" Oct 02 12:24:51 crc kubenswrapper[4783]: I1002 12:24:51.513988 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:24:51 crc kubenswrapper[4783]: I1002 12:24:51.514793 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:25:21 crc kubenswrapper[4783]: I1002 12:25:21.513394 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:25:21 crc kubenswrapper[4783]: I1002 12:25:21.513971 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:25:21 crc kubenswrapper[4783]: I1002 12:25:21.514083 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 12:25:21 crc kubenswrapper[4783]: I1002 12:25:21.514834 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"670704b352d07cf874134954cd2644f7bff5d175ece897d3e20733251f4c874c"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 12:25:21 crc kubenswrapper[4783]: I1002 12:25:21.514898 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://670704b352d07cf874134954cd2644f7bff5d175ece897d3e20733251f4c874c" gracePeriod=600 Oct 02 12:25:22 crc kubenswrapper[4783]: I1002 12:25:22.060273 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="670704b352d07cf874134954cd2644f7bff5d175ece897d3e20733251f4c874c" exitCode=0 Oct 02 12:25:22 crc kubenswrapper[4783]: I1002 12:25:22.060360 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"670704b352d07cf874134954cd2644f7bff5d175ece897d3e20733251f4c874c"} Oct 02 12:25:22 crc kubenswrapper[4783]: I1002 12:25:22.060687 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerStarted","Data":"30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009"} Oct 02 12:25:22 crc kubenswrapper[4783]: I1002 12:25:22.060718 4783 scope.go:117] "RemoveContainer" containerID="e3e4225ed328e5591d066522ee52a27735fed7d1b5cd0007ed03d88c986c058b" Oct 02 12:27:03 crc kubenswrapper[4783]: I1002 12:27:03.058303 4783 generic.go:334] "Generic (PLEG): container finished" podID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerID="1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2" exitCode=0 Oct 02 12:27:03 crc kubenswrapper[4783]: I1002 12:27:03.058383 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-52dnv/must-gather-q5nb7" event={"ID":"d11528d7-0229-43e7-90fe-cd6f19b61a2a","Type":"ContainerDied","Data":"1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2"} Oct 02 12:27:03 crc kubenswrapper[4783]: I1002 12:27:03.059939 4783 scope.go:117] "RemoveContainer" containerID="1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2" Oct 02 12:27:03 crc kubenswrapper[4783]: I1002 12:27:03.756031 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-52dnv_must-gather-q5nb7_d11528d7-0229-43e7-90fe-cd6f19b61a2a/gather/0.log" Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.079856 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-52dnv/must-gather-q5nb7"] Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.082524 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-52dnv/must-gather-q5nb7" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerName="copy" containerID="cri-o://9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc" gracePeriod=2 Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.097044 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-52dnv/must-gather-q5nb7"] Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.679638 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-52dnv_must-gather-q5nb7_d11528d7-0229-43e7-90fe-cd6f19b61a2a/copy/0.log" Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.680848 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.746231 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kcxkc\" (UniqueName: \"kubernetes.io/projected/d11528d7-0229-43e7-90fe-cd6f19b61a2a-kube-api-access-kcxkc\") pod \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.747526 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d11528d7-0229-43e7-90fe-cd6f19b61a2a-must-gather-output\") pod \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\" (UID: \"d11528d7-0229-43e7-90fe-cd6f19b61a2a\") " Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.754250 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d11528d7-0229-43e7-90fe-cd6f19b61a2a-kube-api-access-kcxkc" (OuterVolumeSpecName: "kube-api-access-kcxkc") pod "d11528d7-0229-43e7-90fe-cd6f19b61a2a" (UID: "d11528d7-0229-43e7-90fe-cd6f19b61a2a"). InnerVolumeSpecName "kube-api-access-kcxkc". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.849827 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kcxkc\" (UniqueName: \"kubernetes.io/projected/d11528d7-0229-43e7-90fe-cd6f19b61a2a-kube-api-access-kcxkc\") on node \"crc\" DevicePath \"\"" Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.952531 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d11528d7-0229-43e7-90fe-cd6f19b61a2a-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "d11528d7-0229-43e7-90fe-cd6f19b61a2a" (UID: "d11528d7-0229-43e7-90fe-cd6f19b61a2a"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:27:12 crc kubenswrapper[4783]: I1002 12:27:12.959623 4783 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/d11528d7-0229-43e7-90fe-cd6f19b61a2a-must-gather-output\") on node \"crc\" DevicePath \"\"" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.179912 4783 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-52dnv_must-gather-q5nb7_d11528d7-0229-43e7-90fe-cd6f19b61a2a/copy/0.log" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.181032 4783 generic.go:334] "Generic (PLEG): container finished" podID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerID="9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc" exitCode=143 Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.181084 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-52dnv/must-gather-q5nb7" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.181132 4783 scope.go:117] "RemoveContainer" containerID="9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.268578 4783 scope.go:117] "RemoveContainer" containerID="1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.344494 4783 scope.go:117] "RemoveContainer" containerID="9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc" Oct 02 12:27:13 crc kubenswrapper[4783]: E1002 12:27:13.346469 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc\": container with ID starting with 9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc not found: ID does not exist" containerID="9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.346511 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc"} err="failed to get container status \"9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc\": rpc error: code = NotFound desc = could not find container \"9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc\": container with ID starting with 9e78455863d87355ee007a76a8522b2666bc84cc30d540092f839d488bbccdcc not found: ID does not exist" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.346540 4783 scope.go:117] "RemoveContainer" containerID="1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2" Oct 02 12:27:13 crc kubenswrapper[4783]: E1002 12:27:13.348824 4783 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2\": container with ID starting with 1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2 not found: ID does not exist" containerID="1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.348866 4783 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2"} err="failed to get container status \"1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2\": rpc error: code = NotFound desc = could not find container \"1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2\": container with ID starting with 1fb607cf03fe7a8e098a6a306e2cd32833abde66196a494d581e8440725393e2 not found: ID does not exist" Oct 02 12:27:13 crc kubenswrapper[4783]: I1002 12:27:13.556910 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" path="/var/lib/kubelet/pods/d11528d7-0229-43e7-90fe-cd6f19b61a2a/volumes" Oct 02 12:27:21 crc kubenswrapper[4783]: I1002 12:27:21.513913 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:27:21 crc kubenswrapper[4783]: I1002 12:27:21.515037 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.404211 4783 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2gdvg"] Oct 02 12:27:39 crc kubenswrapper[4783]: E1002 12:27:39.405234 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="registry-server" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405248 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="registry-server" Oct 02 12:27:39 crc kubenswrapper[4783]: E1002 12:27:39.405273 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="extract-content" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405279 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="extract-content" Oct 02 12:27:39 crc kubenswrapper[4783]: E1002 12:27:39.405296 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="extract-utilities" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405303 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="extract-utilities" Oct 02 12:27:39 crc kubenswrapper[4783]: E1002 12:27:39.405314 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db15bda2-72c3-4ec5-ae17-92ff291af121" containerName="container-00" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405319 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="db15bda2-72c3-4ec5-ae17-92ff291af121" containerName="container-00" Oct 02 12:27:39 crc kubenswrapper[4783]: E1002 12:27:39.405336 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerName="gather" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405341 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerName="gather" Oct 02 12:27:39 crc kubenswrapper[4783]: E1002 12:27:39.405357 4783 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerName="copy" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405362 4783 state_mem.go:107] "Deleted CPUSet assignment" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerName="copy" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405542 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="8cb4cbd2-32f7-461f-8a01-47baf029d1a5" containerName="registry-server" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405554 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerName="copy" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405564 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="db15bda2-72c3-4ec5-ae17-92ff291af121" containerName="container-00" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.405574 4783 memory_manager.go:354] "RemoveStaleState removing state" podUID="d11528d7-0229-43e7-90fe-cd6f19b61a2a" containerName="gather" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.406919 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.456562 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ztm92\" (UniqueName: \"kubernetes.io/projected/89640d51-4026-4348-8aab-511d378b6ad9-kube-api-access-ztm92\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.456702 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89640d51-4026-4348-8aab-511d378b6ad9-catalog-content\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.456783 4783 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89640d51-4026-4348-8aab-511d378b6ad9-utilities\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.481649 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2gdvg"] Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.559912 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89640d51-4026-4348-8aab-511d378b6ad9-catalog-content\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.559975 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89640d51-4026-4348-8aab-511d378b6ad9-utilities\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.560181 4783 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ztm92\" (UniqueName: \"kubernetes.io/projected/89640d51-4026-4348-8aab-511d378b6ad9-kube-api-access-ztm92\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.561773 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89640d51-4026-4348-8aab-511d378b6ad9-utilities\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.561799 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89640d51-4026-4348-8aab-511d378b6ad9-catalog-content\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.582482 4783 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ztm92\" (UniqueName: \"kubernetes.io/projected/89640d51-4026-4348-8aab-511d378b6ad9-kube-api-access-ztm92\") pod \"certified-operators-2gdvg\" (UID: \"89640d51-4026-4348-8aab-511d378b6ad9\") " pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:39 crc kubenswrapper[4783]: I1002 12:27:39.727565 4783 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:40 crc kubenswrapper[4783]: I1002 12:27:40.343512 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2gdvg"] Oct 02 12:27:40 crc kubenswrapper[4783]: I1002 12:27:40.433653 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gdvg" event={"ID":"89640d51-4026-4348-8aab-511d378b6ad9","Type":"ContainerStarted","Data":"72ec94f83f87731c4ecba129d04af0ccd31b7ac904ad8ec1995ce20e2bd1d2d5"} Oct 02 12:27:41 crc kubenswrapper[4783]: I1002 12:27:41.448654 4783 generic.go:334] "Generic (PLEG): container finished" podID="89640d51-4026-4348-8aab-511d378b6ad9" containerID="62e3e6fad497dfc1159310386eca3d66865cdbc505a1720980021c22ad35f681" exitCode=0 Oct 02 12:27:41 crc kubenswrapper[4783]: I1002 12:27:41.448757 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gdvg" event={"ID":"89640d51-4026-4348-8aab-511d378b6ad9","Type":"ContainerDied","Data":"62e3e6fad497dfc1159310386eca3d66865cdbc505a1720980021c22ad35f681"} Oct 02 12:27:41 crc kubenswrapper[4783]: I1002 12:27:41.453306 4783 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Oct 02 12:27:48 crc kubenswrapper[4783]: I1002 12:27:48.537934 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gdvg" event={"ID":"89640d51-4026-4348-8aab-511d378b6ad9","Type":"ContainerStarted","Data":"430fe71f70529b8b279754868f868c419992af76587252eb352fc5dd697ab3d1"} Oct 02 12:27:49 crc kubenswrapper[4783]: I1002 12:27:49.556591 4783 generic.go:334] "Generic (PLEG): container finished" podID="89640d51-4026-4348-8aab-511d378b6ad9" containerID="430fe71f70529b8b279754868f868c419992af76587252eb352fc5dd697ab3d1" exitCode=0 Oct 02 12:27:49 crc kubenswrapper[4783]: I1002 12:27:49.560332 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gdvg" event={"ID":"89640d51-4026-4348-8aab-511d378b6ad9","Type":"ContainerDied","Data":"430fe71f70529b8b279754868f868c419992af76587252eb352fc5dd697ab3d1"} Oct 02 12:27:51 crc kubenswrapper[4783]: I1002 12:27:51.513355 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:27:51 crc kubenswrapper[4783]: I1002 12:27:51.514051 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:27:52 crc kubenswrapper[4783]: I1002 12:27:52.586257 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2gdvg" event={"ID":"89640d51-4026-4348-8aab-511d378b6ad9","Type":"ContainerStarted","Data":"97fd46cfd163f96ef64d0f101b9a462a85a63921adb407a4413e241b7a841ec6"} Oct 02 12:27:52 crc kubenswrapper[4783]: I1002 12:27:52.610842 4783 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2gdvg" podStartSLOduration=4.234077643 podStartE2EDuration="13.610820106s" podCreationTimestamp="2025-10-02 12:27:39 +0000 UTC" firstStartedPulling="2025-10-02 12:27:41.450641311 +0000 UTC m=+5694.766835572" lastFinishedPulling="2025-10-02 12:27:50.827383774 +0000 UTC m=+5704.143578035" observedRunningTime="2025-10-02 12:27:52.600650829 +0000 UTC m=+5705.916845100" watchObservedRunningTime="2025-10-02 12:27:52.610820106 +0000 UTC m=+5705.927014377" Oct 02 12:27:59 crc kubenswrapper[4783]: I1002 12:27:59.727963 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:59 crc kubenswrapper[4783]: I1002 12:27:59.728646 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:27:59 crc kubenswrapper[4783]: I1002 12:27:59.787955 4783 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:28:00 crc kubenswrapper[4783]: I1002 12:28:00.709112 4783 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2gdvg" Oct 02 12:28:00 crc kubenswrapper[4783]: I1002 12:28:00.815750 4783 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2gdvg"] Oct 02 12:28:00 crc kubenswrapper[4783]: I1002 12:28:00.894371 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ttd82"] Oct 02 12:28:00 crc kubenswrapper[4783]: I1002 12:28:00.894911 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ttd82" podUID="29f73f22-c6f7-4508-a35a-6ff23efdaaf1" containerName="registry-server" containerID="cri-o://cea9678e16c6bd0838125cd3161b8616f3b4c267f840d30ae963e6f75ebac979" gracePeriod=2 Oct 02 12:28:01 crc kubenswrapper[4783]: I1002 12:28:01.673909 4783 generic.go:334] "Generic (PLEG): container finished" podID="29f73f22-c6f7-4508-a35a-6ff23efdaaf1" containerID="cea9678e16c6bd0838125cd3161b8616f3b4c267f840d30ae963e6f75ebac979" exitCode=0 Oct 02 12:28:01 crc kubenswrapper[4783]: I1002 12:28:01.673987 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ttd82" event={"ID":"29f73f22-c6f7-4508-a35a-6ff23efdaaf1","Type":"ContainerDied","Data":"cea9678e16c6bd0838125cd3161b8616f3b4c267f840d30ae963e6f75ebac979"} Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.010034 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.136984 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-catalog-content\") pod \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.137092 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-utilities\") pod \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.137151 4783 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rwrn4\" (UniqueName: \"kubernetes.io/projected/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-kube-api-access-rwrn4\") pod \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\" (UID: \"29f73f22-c6f7-4508-a35a-6ff23efdaaf1\") " Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.151862 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-utilities" (OuterVolumeSpecName: "utilities") pod "29f73f22-c6f7-4508-a35a-6ff23efdaaf1" (UID: "29f73f22-c6f7-4508-a35a-6ff23efdaaf1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.157076 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-kube-api-access-rwrn4" (OuterVolumeSpecName: "kube-api-access-rwrn4") pod "29f73f22-c6f7-4508-a35a-6ff23efdaaf1" (UID: "29f73f22-c6f7-4508-a35a-6ff23efdaaf1"). InnerVolumeSpecName "kube-api-access-rwrn4". PluginName "kubernetes.io/projected", VolumeGidValue "" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.216598 4783 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "29f73f22-c6f7-4508-a35a-6ff23efdaaf1" (UID: "29f73f22-c6f7-4508-a35a-6ff23efdaaf1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.239666 4783 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-catalog-content\") on node \"crc\" DevicePath \"\"" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.239708 4783 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-utilities\") on node \"crc\" DevicePath \"\"" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.239721 4783 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rwrn4\" (UniqueName: \"kubernetes.io/projected/29f73f22-c6f7-4508-a35a-6ff23efdaaf1-kube-api-access-rwrn4\") on node \"crc\" DevicePath \"\"" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.701561 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ttd82" event={"ID":"29f73f22-c6f7-4508-a35a-6ff23efdaaf1","Type":"ContainerDied","Data":"8bdf8043c5e0e2af7ec0e38777468f7d4adb7e23cb08acc270a3dca749fcef37"} Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.701609 4783 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ttd82" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.701623 4783 scope.go:117] "RemoveContainer" containerID="cea9678e16c6bd0838125cd3161b8616f3b4c267f840d30ae963e6f75ebac979" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.742225 4783 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ttd82"] Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.745553 4783 scope.go:117] "RemoveContainer" containerID="593e0cb4ec9dc61f7e8becdf621fddb468a50c0effdbd9b738de21433a1a2835" Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.749364 4783 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ttd82"] Oct 02 12:28:02 crc kubenswrapper[4783]: I1002 12:28:02.879779 4783 scope.go:117] "RemoveContainer" containerID="f718d554e1568c2125d257dedebc38d4186dbf2164a01a097da985089b28032e" Oct 02 12:28:03 crc kubenswrapper[4783]: I1002 12:28:03.556177 4783 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="29f73f22-c6f7-4508-a35a-6ff23efdaaf1" path="/var/lib/kubelet/pods/29f73f22-c6f7-4508-a35a-6ff23efdaaf1/volumes" Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.513434 4783 patch_prober.go:28] interesting pod/machine-config-daemon-2j8rt container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.514172 4783 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.514226 4783 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.515028 4783 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009"} pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.515074 4783 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerName="machine-config-daemon" containerID="cri-o://30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009" gracePeriod=600 Oct 02 12:28:21 crc kubenswrapper[4783]: E1002 12:28:21.639466 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.931693 4783 generic.go:334] "Generic (PLEG): container finished" podID="3288cc82-59a8-408e-8b0e-b5255882b4fb" containerID="30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009" exitCode=0 Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.931790 4783 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" event={"ID":"3288cc82-59a8-408e-8b0e-b5255882b4fb","Type":"ContainerDied","Data":"30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009"} Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.932107 4783 scope.go:117] "RemoveContainer" containerID="670704b352d07cf874134954cd2644f7bff5d175ece897d3e20733251f4c874c" Oct 02 12:28:21 crc kubenswrapper[4783]: I1002 12:28:21.932899 4783 scope.go:117] "RemoveContainer" containerID="30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009" Oct 02 12:28:21 crc kubenswrapper[4783]: E1002 12:28:21.933210 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:28:36 crc kubenswrapper[4783]: I1002 12:28:36.547278 4783 scope.go:117] "RemoveContainer" containerID="30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009" Oct 02 12:28:36 crc kubenswrapper[4783]: E1002 12:28:36.548054 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" Oct 02 12:28:49 crc kubenswrapper[4783]: I1002 12:28:49.545644 4783 scope.go:117] "RemoveContainer" containerID="30c031e46fecfbb6918021b4ba04956a9544b5c18e204d9f2b1f43e736d7d009" Oct 02 12:28:49 crc kubenswrapper[4783]: E1002 12:28:49.546551 4783 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-2j8rt_openshift-machine-config-operator(3288cc82-59a8-408e-8b0e-b5255882b4fb)\"" pod="openshift-machine-config-operator/machine-config-daemon-2j8rt" podUID="3288cc82-59a8-408e-8b0e-b5255882b4fb" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515067470024024452 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015067470025017370 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015067454101016510 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015067454101015460 5ustar corecore